diff --git a/lm-evaluation/build/lib/lm_eval/__init__.py b/lm-evaluation/build/lib/lm_eval/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..0027b9a8e0568b44f358d26a157fe310d4178e32 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/__init__.py @@ -0,0 +1,4 @@ +from .evaluator import evaluate, simple_evaluate + +import habana_frameworks.torch.gpu_migration +import habana_frameworks.torch.core as htcore diff --git a/lm-evaluation/build/lib/lm_eval/__main__.py b/lm-evaluation/build/lib/lm_eval/__main__.py new file mode 100644 index 0000000000000000000000000000000000000000..2656c3633ef29f0f5f22d6ad5b0bb1cfacbff2f5 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/__main__.py @@ -0,0 +1,417 @@ +import argparse +import json +import logging +import os +import re +import sys +from functools import partial +from pathlib import Path +from typing import Union + +import numpy as np + +from lm_eval import evaluator, utils +from lm_eval.evaluator import request_caching_arg_to_dict +from lm_eval.logging_utils import WandbLogger +from lm_eval.tasks import TaskManager +from lm_eval.utils import make_table, simple_parse_args_string + + +DEFAULT_RESULTS_FILE = "results.json" + + +def _handle_non_serializable(o): + if isinstance(o, np.int64) or isinstance(o, np.int32): + return int(o) + elif isinstance(o, set): + return list(o) + else: + return str(o) + + +def _int_or_none_list_arg_type(max_len: int, value: str, split_char: str = ","): + def parse_value(item): + item = item.strip().lower() + if item == "none": + return None + try: + return int(item) + except ValueError: + raise argparse.ArgumentTypeError(f"{item} is not an integer or None") + + items = [parse_value(v) for v in value.split(split_char)] + num_items = len(items) + + if num_items == 1: + # Makes downstream handling the same for single and multiple values + items = items * max_len + elif num_items != max_len: + raise argparse.ArgumentTypeError( + f"Argument requires {max_len} integers or None, separated by '{split_char}'" + ) + + return items + + +def check_argument_types(parser: argparse.ArgumentParser): + """ + Check to make sure all CLI args are typed, raises error if not + """ + for action in parser._actions: + if action.dest != "help" and not action.const: + if action.type is None: + raise ValueError( + f"Argument '{action.dest}' doesn't have a type specified." + ) + else: + continue + + +def setup_parser() -> argparse.ArgumentParser: + parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter) + parser.add_argument( + "--model", "-m", type=str, default="hf", help="Name of model e.g. `hf`" + ) + parser.add_argument( + "--tasks", + "-t", + default=None, + type=str, + metavar="task1,task2", + help="To get full list of tasks, use the command lm-eval --tasks list", + ) + parser.add_argument( + "--model_args", + "-a", + default="", + type=str, + help="Comma separated string arguments for model, e.g. `pretrained=EleutherAI/pythia-160m,dtype=float32`", + ) + parser.add_argument( + "--num_fewshot", + "-f", + type=int, + default=None, + metavar="N", + help="Number of examples in few-shot context", + ) + parser.add_argument( + "--batch_size", + "-b", + type=str, + default=1, + metavar="auto|auto:N|N", + help="Acceptable values are 'auto', 'auto:N' or N, where N is an integer. Default 1.", + ) + parser.add_argument( + "--max_batch_size", + type=int, + default=None, + metavar="N", + help="Maximal batch size to try with --batch_size auto.", + ) + parser.add_argument( + "--device", + type=str, + default=None, + help="Device to use (e.g. cuda, cuda:0, cpu).", + ) + parser.add_argument( + "--output_path", + "-o", + default=None, + type=str, + metavar="DIR|DIR/file.json", + help="The path to the output file where the result metrics will be saved. If the path is a directory and log_samples is true, the results will be saved in the directory. Else the parent directory will be used.", + ) + parser.add_argument( + "--limit", + "-L", + type=float, + default=None, + metavar="N|0 argparse.Namespace: + check_argument_types(parser) + return parser.parse_args() + + +def cli_evaluate(args: Union[argparse.Namespace, None] = None) -> None: + if not args: + # we allow for args to be passed externally, else we parse them ourselves + parser = setup_parser() + args = parse_eval_args(parser) + + if args.wandb_args: + wandb_logger = WandbLogger(**simple_parse_args_string(args.wandb_args)) + + eval_logger = utils.eval_logger + eval_logger.setLevel(getattr(logging, f"{args.verbosity}")) + eval_logger.info(f"Verbosity set to {args.verbosity}") + os.environ["TOKENIZERS_PARALLELISM"] = "false" + + if args.predict_only: + args.log_samples = True + if (args.log_samples or args.predict_only) and not args.output_path: + raise ValueError( + "Specify --output_path if providing --log_samples or --predict_only" + ) + + if args.include_path is not None: + eval_logger.info(f"Including path: {args.include_path}") + task_manager = TaskManager(args.verbosity, include_path=args.include_path) + + if args.limit: + eval_logger.warning( + " --limit SHOULD ONLY BE USED FOR TESTING." + "REAL METRICS SHOULD NOT BE COMPUTED USING LIMIT." + ) + + if args.tasks is None: + eval_logger.error("Need to specify task to evaluate.") + sys.exit() + elif args.tasks == "list": + eval_logger.info( + "Available Tasks:\n - {}".format("\n - ".join(task_manager.all_tasks)) + ) + sys.exit() + else: + if os.path.isdir(args.tasks): + import glob + + task_names = [] + yaml_path = os.path.join(args.tasks, "*.yaml") + for yaml_file in glob.glob(yaml_path): + config = utils.load_yaml_config(yaml_file) + task_names.append(config) + else: + task_list = args.tasks.split(",") + task_names = task_manager.match_tasks(task_list) + for task in [task for task in task_list if task not in task_names]: + if os.path.isfile(task): + config = utils.load_yaml_config(task) + task_names.append(config) + task_missing = [ + task for task in task_list if task not in task_names and "*" not in task + ] # we don't want errors if a wildcard ("*") task name was used + + if task_missing: + missing = ", ".join(task_missing) + eval_logger.error( + f"Tasks were not found: {missing}\n" + f"{utils.SPACING}Try `lm-eval --tasks list` for list of available tasks", + ) + raise ValueError( + f"Tasks not found: {missing}. Try `lm-eval --tasks list` for list of available tasks, or '--verbosity DEBUG' to troubleshoot task registration issues." + ) + + if args.output_path: + path = Path(args.output_path) + # check if file or 'dir/results.json' exists + if path.is_file(): + raise FileExistsError(f"File already exists at {path}") + output_path_file = path.joinpath(DEFAULT_RESULTS_FILE) + if output_path_file.is_file(): + eval_logger.warning( + f"File {output_path_file} already exists. Results will be overwritten." + ) + # if path json then get parent dir + elif path.suffix in (".json", ".jsonl"): + output_path_file = path + path.parent.mkdir(parents=True, exist_ok=True) + path = path.parent + else: + path.mkdir(parents=True, exist_ok=True) + + # Respect user's value passed in via CLI, otherwise default to True and add to comma-separated model args + if args.trust_remote_code: + os.environ["HF_DATASETS_TRUST_REMOTE_CODE"] = str(args.trust_remote_code) + args.model_args = ( + args.model_args + + f",trust_remote_code={os.environ['HF_DATASETS_TRUST_REMOTE_CODE']}" + ) + + eval_logger.info(f"Selected Tasks: {task_names}") + + request_caching_args = request_caching_arg_to_dict( + cache_requests=args.cache_requests + ) + + results = evaluator.simple_evaluate( + model=args.model, + model_args=args.model_args, + tasks=task_names, + num_fewshot=args.num_fewshot, + batch_size=args.batch_size, + max_batch_size=args.max_batch_size, + device=args.device, + use_cache=args.use_cache, + limit=args.limit, + check_integrity=args.check_integrity, + write_out=args.write_out, + log_samples=args.log_samples, + gen_kwargs=args.gen_kwargs, + task_manager=task_manager, + verbosity=args.verbosity, + predict_only=args.predict_only, + random_seed=args.seed[0], + numpy_random_seed=args.seed[1], + torch_random_seed=args.seed[2], + **request_caching_args, + ) + + if results is not None: + if args.log_samples: + samples = results.pop("samples") + dumped = json.dumps( + results, indent=2, default=_handle_non_serializable, ensure_ascii=False + ) + if args.show_config: + print(dumped) + + batch_sizes = ",".join(map(str, results["config"]["batch_sizes"])) + + # Add W&B logging + if args.wandb_args: + try: + wandb_logger.post_init(results) + wandb_logger.log_eval_result() + if args.log_samples: + wandb_logger.log_eval_samples(samples) + except Exception as e: + eval_logger.info(f"Logging to Weights and Biases failed due to {e}") + + if args.output_path: + output_path_file.open("w", encoding="utf-8").write(dumped) + + if args.log_samples: + for task_name, config in results["configs"].items(): + output_name = "{}_{}".format( + re.sub(r"[\"<>:/\|\\?\*\[\]]+", "__", args.model_args), + task_name, + ) + filename = path.joinpath(f"{output_name}.jsonl") + samples_dumped = json.dumps( + samples[task_name], + indent=2, + default=_handle_non_serializable, + ensure_ascii=False, + ) + filename.write_text(samples_dumped, encoding="utf-8") + + print( + f"{args.model} ({args.model_args}), gen_kwargs: ({args.gen_kwargs}), limit: {args.limit}, num_fewshot: {args.num_fewshot}, " + f"batch_size: {args.batch_size}{f' ({batch_sizes})' if batch_sizes else ''}" + ) + print(make_table(results)) + if "groups" in results: + print(make_table(results, "groups")) + + if args.wandb_args: + # Tear down wandb run once all the logging is done. + wandb_logger.run.finish() + + +if __name__ == "__main__": + cli_evaluate() diff --git a/lm-evaluation/build/lib/lm_eval/caching/cache.py b/lm-evaluation/build/lib/lm_eval/caching/cache.py new file mode 100644 index 0000000000000000000000000000000000000000..63691435215a05894d206f3f8218ab23c5d2e250 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/caching/cache.py @@ -0,0 +1,55 @@ +import hashlib +import os + +import dill + +from lm_eval.utils import eval_logger + + +MODULE_DIR = os.path.dirname(os.path.realpath(__file__)) + +OVERRIDE_PATH = os.getenv("LM_HARNESS_CACHE_PATH") + + +PATH = OVERRIDE_PATH if OVERRIDE_PATH else f"{MODULE_DIR}/.cache" + +# This should be sufficient for uniqueness +HASH_INPUT = "EleutherAI-lm-evaluation-harness" + +HASH_PREFIX = hashlib.sha256(HASH_INPUT.encode("utf-8")).hexdigest() + +FILE_SUFFIX = f".{HASH_PREFIX}.pickle" + + +def load_from_cache(file_name): + try: + path = f"{PATH}/{file_name}{FILE_SUFFIX}" + + with open(path, "rb") as file: + cached_task_dict = dill.loads(file.read()) + return cached_task_dict + + except Exception: + eval_logger.debug(f"{file_name} is not cached, generating...") + pass + + +def save_to_cache(file_name, obj): + if not os.path.exists(PATH): + os.mkdir(PATH) + + file_path = f"{PATH}/{file_name}{FILE_SUFFIX}" + + eval_logger.debug(f"Saving {file_path} to cache...") + with open(file_path, "wb") as file: + file.write(dill.dumps(obj)) + + +# NOTE the "key" param is to allow for flexibility +def delete_cache(key: str = ""): + files = os.listdir(PATH) + + for file in files: + if file.startswith(key) and file.endswith(FILE_SUFFIX): + file_path = f"{PATH}/{file}" + os.unlink(file_path) diff --git a/lm-evaluation/build/lib/lm_eval/evaluator.py b/lm-evaluation/build/lib/lm_eval/evaluator.py new file mode 100644 index 0000000000000000000000000000000000000000..f0f3ddf79adaf00781550f75d3be9922b73703d3 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/evaluator.py @@ -0,0 +1,583 @@ +import itertools +import logging +import random +import time +from collections import defaultdict +from typing import TYPE_CHECKING, List, Optional, Union + +import numpy as np +import torch + +import lm_eval.api.metrics +import lm_eval.api.registry +import lm_eval.models +from lm_eval.caching.cache import delete_cache +from lm_eval.evaluator_utils import ( + consolidate_results, + get_sample_size, + get_task_list, + prepare_print_tasks, + print_writeout, + run_task_tests, +) +from lm_eval.logging_utils import add_env_info, get_git_commit_hash +from lm_eval.tasks import TaskManager, get_task_dict +from lm_eval.utils import eval_logger, positional_deprecated, simple_parse_args_string + + +if TYPE_CHECKING: + from lm_eval.api.model import LM + from lm_eval.tasks import Task + + +@positional_deprecated +def simple_evaluate( + model, + model_args: Optional[Union[str, dict]] = None, + tasks: Optional[List[Union[str, dict, object]]] = None, + num_fewshot: Optional[int] = None, + batch_size: Optional[int] = None, + max_batch_size: Optional[int] = None, + device: Optional[str] = None, + use_cache: Optional[str] = None, + cache_requests: bool = False, + rewrite_requests_cache: bool = False, + delete_requests_cache: bool = False, + limit: Optional[Union[int, float]] = None, + bootstrap_iters: int = 100000, + check_integrity: bool = False, + write_out: bool = False, + log_samples: bool = True, + gen_kwargs: Optional[str] = None, + task_manager: Optional[TaskManager] = None, + verbosity: str = "INFO", + predict_only: bool = False, + random_seed: int = 0, + numpy_random_seed: int = 1234, + torch_random_seed: int = 1234, +): + """Instantiate and evaluate a model on a list of tasks. + + :param model: Union[str, LM] + Name of model or LM object, see lm_eval.models.get_model + :param model_args: Optional[str, dict] + String or dict arguments for each model class, see LM.create_from_arg_string and LM.create_from_arg_object. + Ignored if `model` argument is a LM object. + :param tasks: list[Union[str, dict, Task]] + List of task names or Task objects. Task objects will be taken to have name task.EVAL_HARNESS_NAME if defined and type(task).__name__ otherwise. + :param num_fewshot: int + Number of examples in few-shot context + :param batch_size: int or str, optional + Batch size for model + :param max_batch_size: int, optional + Maximal batch size to try with automatic batch size detection + :param device: str, optional + PyTorch device (e.g. "cpu" or "cuda:0") for running models + :param use_cache: str, optional + A path to a sqlite db file for caching model responses. `None` if not caching. + :param cache_requests: bool, optional + Speed up evaluation by caching the building of dataset requests. `None` if not caching. + :param rewrite_requests_cache: bool, optional + Rewrites all of the request cache if set to `True`. `None` if not desired. + :param delete_requests_cache: bool, optional + Deletes all of the request cache if set to `True`. `None` if not desired. + :param limit: int or float, optional + Limit the number of examples per task (only use this for testing), If <1, limit is a percentage of the total number of examples. + :param bootstrap_iters: + Number of iterations for bootstrap statistics + :param check_integrity: bool + Whether to run the relevant part of the test suite for the tasks + :param write_out: bool + If True, write out an example document and model input for checking task integrity + :param log_samples: bool + If True, write out all model outputs and documents for per-sample measurement and post-hoc analysis + :param gen_kwargs: str + String arguments for model generation + Ignored for all tasks with loglikelihood output_type + :param predict_only: bool + If true only model outputs will be generated and returned. Metrics will not be evaluated + :param random_seed: int + Random seed for python's random module. If set to None, the seed will not be set. + :param numpy_random_seed: int + Random seed for numpy. If set to None, the seed will not be set. + :param torch_random_seed: int + Random seed for torch. If set to None, the seed will not be set. + + :return + Dictionary of results + """ + eval_logger.setLevel(getattr(logging, f"{verbosity}")) + start_date = time.time() + + if delete_requests_cache: + eval_logger.info("Deleting requests cache...") + delete_cache() + + seed_message = [] + if random_seed is not None: + # See https://github.com/EleutherAI/lm-evaluation-harness/pull/1412 + seed_message.append(f"Setting random seed to {random_seed}") + random.seed(random_seed) + + if numpy_random_seed is not None: + seed_message.append(f"Setting numpy seed to {numpy_random_seed}") + np.random.seed(numpy_random_seed) + + if torch_random_seed is not None: + seed_message.append(f"Setting torch manual seed to {torch_random_seed}") + torch.manual_seed(torch_random_seed) + + if seed_message: + eval_logger.info(" | ".join(seed_message)) + + if tasks is None: + tasks = [] + if len(tasks) == 0: + raise ValueError( + "No tasks specified, or no tasks found. Please verify the task names." + ) + + if gen_kwargs is not None: + gen_kwargs = simple_parse_args_string(gen_kwargs) + eval_logger.warning( + "generation_kwargs specified through cli, these settings will update set parameters in yaml tasks. " + "Ensure 'do_sample=True' for non-greedy decoding!" + ) + if gen_kwargs == "": + gen_kwargs = None + + if isinstance(model, str): + if model_args is None: + eval_logger.warning("model_args not specified. Using defaults.") + model_args = "" + if "pretrained" not in model_args and model in [ + "hf-auto", + "hf", + "huggingface", + "vllm", + ]: + eval_logger.warning( + "pretrained not specified. Using default pretrained=gpt2." + ) + + if isinstance(model_args, dict): + eval_logger.info( + f"Initializing {model} model, with arguments: {model_args}" + ) + lm = lm_eval.api.registry.get_model(model).create_from_arg_obj( + model_args, + { + "batch_size": batch_size, + "max_batch_size": max_batch_size, + "device": device, + }, + ) + + else: + eval_logger.info( + f"Initializing {model} model, with arguments: {simple_parse_args_string(model_args)}" + ) + lm = lm_eval.api.registry.get_model(model).create_from_arg_string( + model_args, + { + "batch_size": batch_size, + "max_batch_size": max_batch_size, + "device": device, + }, + ) + else: + if not isinstance(model, lm_eval.api.model.LM): + raise TypeError + eval_logger.info("Using pre-initialized model") + lm = model + + if use_cache is not None: + eval_logger.info(f"Using cache at {use_cache + '_rank' + str(lm.rank) + '.db'}") + lm = lm_eval.api.model.CachingLM( + lm, + use_cache + # each rank receives a different cache db. + # necessary to avoid multiple writes to cache at once + + "_rank" + + str(lm.rank) + + ".db", + ) + + if task_manager is None: + task_manager = TaskManager(verbosity) + + task_dict = get_task_dict(tasks, task_manager) + for task_name in task_dict.keys(): + task_obj = task_dict[task_name] + if isinstance(task_obj, tuple): + _, task_obj = task_obj + if task_obj is None: + continue + + if task_obj.get_config("output_type") == "generate_until": + if gen_kwargs is not None: + task_obj.set_config( + key="generation_kwargs", value=gen_kwargs, update=True + ) + + if predict_only: + log_samples = True + eval_logger.info( + f"Processing {task_name} in output-only mode. Metrics will not be calculated!" + ) + # we have to change the class properties post-hoc. This is pretty hacky. + task_obj.override_metric(metric_name="bypass") + + # override tasks' fewshot values to the provided num_fewshot arg value + # except if tasks have it set to 0 manually in their configs--then we should never overwrite that + if num_fewshot is not None: + if (default_num_fewshot := task_obj.get_config("num_fewshot")) == 0: + eval_logger.info( + f"num_fewshot has been set to 0 for {task_name} in its config. Manual configuration will be ignored." + ) + else: + eval_logger.warning( + f"Overwriting default num_fewshot of {task_name} from {default_num_fewshot} to {num_fewshot}" + ) + task_obj.set_config(key="num_fewshot", value=num_fewshot) + else: + # if num_fewshot not provided, and the task does not define a default one, default to 0 + if (default_num_fewshot := task_obj.get_config("num_fewshot")) is None: + task_obj.set_config(key="num_fewshot", value=0) + + if check_integrity: + run_task_tests(task_list=tasks) + + results = evaluate( + lm=lm, + task_dict=task_dict, + limit=limit, + cache_requests=cache_requests, + rewrite_requests_cache=rewrite_requests_cache, + bootstrap_iters=bootstrap_iters, + write_out=write_out, + log_samples=log_samples, + verbosity=verbosity, + ) + + if lm.rank == 0: + if isinstance(model, str): + model_name = model + elif hasattr(model, "config") and hasattr(model.config, "_name_or_path"): + model_name = model.config._name_or_path + else: + model_name = type(model).__name__ + + # add info about the model and few shot config + results["config"] = { + "model": model_name, + "model_args": model_args, + "batch_size": batch_size, + "batch_sizes": ( + list(lm.batch_sizes.values()) if hasattr(lm, "batch_sizes") else [] + ), + "device": device, + "use_cache": use_cache, + "limit": limit, + "bootstrap_iters": bootstrap_iters, + "gen_kwargs": gen_kwargs, + } + results["git_hash"] = get_git_commit_hash() + results["date"] = start_date + add_env_info(results) # additional environment info to results + return results + else: + return None + + +@positional_deprecated +def evaluate( + lm: "LM", + task_dict, + limit: Optional[int] = None, + cache_requests: bool = False, + rewrite_requests_cache: bool = False, + bootstrap_iters: Optional[int] = 100000, + write_out: bool = False, + log_samples: bool = True, + verbosity: str = "INFO", +): + """Instantiate and evaluate a model on a list of tasks. + + :param lm: obj + Language Model + :param task_dict: dict[str, Task] + Dictionary of tasks. Tasks will be taken to have name type(task).config.task . + :param limit: int, optional + Limit the number of examples per task (only use this for testing) + :param bootstrap_iters: + Number of iterations for bootstrap statistics + :param write_out: bool + If True, write out an example document and model input for checking task integrity + :param log_samples: bool + If True, write out all model outputs and documents for per-sample measurement and post-hoc analysis + :return + Dictionary of results + """ + + eval_logger.setLevel(getattr(logging, f"{verbosity}")) + + # tracks all Instances/requests a model must generate output on. + requests = defaultdict(list) + # stores the amount to pad out reqs per req. type so that + # number of fwd passes per distributed rank is equal + padding_requests = defaultdict(int) + + # get lists of group hierarchy and each type of request + task_hierarchy, eval_tasks = get_task_list(task_dict) + if not log_samples: + if not all( + "bypass" not in getattr(task_output.task, "_metric_fn_list", {}).keys() + for task_output in eval_tasks + ): + raise ValueError("log_samples must be True for 'bypass' metric-only tasks") + for task_output in eval_tasks: + task: Task = task_output.task + limit = get_sample_size(task, limit) + task.build_all_requests( + limit=limit, + rank=lm.rank, + world_size=lm.world_size, + cache_requests=cache_requests, + rewrite_requests_cache=rewrite_requests_cache, + ) + eval_logger.debug( + f"Task: {task_output.task_name}; number of requests on this rank: {len(task.instances)}" + ) + + if write_out: + print_writeout(task) + # aggregate Instances by LM method requested to get output. + for instance in task.instances: + reqtype = instance.request_type + requests[reqtype].append(instance) + + if lm.world_size > 1: + instances_rnk = torch.tensor(len(task._instances), device=lm.device) + gathered_item = ( + lm.accelerator.gather(instances_rnk).cpu().detach().numpy().tolist() + ) + # "multiple_choice" task types dispatch (several) "loglikelihood" request types + reqtype = ( + "loglikelihood" + if task.OUTPUT_TYPE == "multiple_choice" + else task.OUTPUT_TYPE + ) + # compute number of pseudo-batches to pad with (FSDP/DDP require even batches among ranks) + numpad = max(gathered_item) - gathered_item[lm.rank] + # todo: may not account for padding in cases like SquadV2 which has multiple req types + padding_requests[reqtype] += numpad + + ### Run LM on inputs, get all outputs ### + # execute each type of request + for reqtype, reqs in requests.items(): + eval_logger.info(f"Running {reqtype} requests") + # create `K` copies of each request `req` based off `K = req.repeats` + cloned_reqs = [] + for req in reqs: + cloned_reqs.extend([req] * req.repeats) + + if (lm.world_size > 1) and (padding_requests[reqtype] > 0): + for _ in range(padding_requests[reqtype]): + cloned_reqs.extend([req] * req.repeats) + + # run requests through model + resps = getattr(lm, reqtype)(cloned_reqs) + + # put responses from model into a list of length K for each request. + for x, req in zip(resps, cloned_reqs): + req.resps.append(x) + + if lm.world_size > 1: + lm.accelerator.wait_for_everyone() + + RANK = lm.rank + WORLD_SIZE = lm.world_size + ### Postprocess outputs ### + # TODO: del model here, maybe (idea: allow user to specify device of e.g. reward model separately) + for task_output in eval_tasks: + task = task_output.task + task.apply_filters() + + ### Collect values of metrics on all datapoints ### + # # unpack results and sort back in order and return control to Task + # TODO: make it possible to use a different metric per filter + # Pre-process task.instances to group by doc_id + instances_by_doc_id = defaultdict(list) + for instance in task.instances: + instances_by_doc_id[instance.doc_id].append(instance) + # Sort instances within each group + for instances in instances_by_doc_id.values(): + instances.sort(key=lambda x: x.idx) + # iterate over different filters used + for filter_key in task.instances[0].filtered_resps.keys(): + doc_iterator = task.doc_iterator( + rank=RANK, limit=limit, world_size=WORLD_SIZE + ) + for doc_id, doc in doc_iterator: + requests = instances_by_doc_id[doc_id] + metrics = task.process_results( + doc, [req.filtered_resps[filter_key] for req in requests] + ) + if log_samples: + target = task.doc_to_target(doc) + example = { + "doc_id": doc_id, + "doc": doc, + "target": target, + "arguments": [req.args for req in requests], + "resps": [req.resps for req in requests], + "filtered_resps": [ + req.filtered_resps[filter_key] for req in requests + ], + } + example.update(metrics) + task_output.logged_samples.append(example) + for metric, value in metrics.items(): + task_output.sample_metrics[(metric, filter_key)].append(value) + + if WORLD_SIZE > 1: + # if multigpu, then gather data across all ranks to rank 0 + # first gather logged samples across all ranks + for task_output in eval_tasks: + if log_samples: + # for task_name, task_samples in list(samples.items()): + full_samples = [None] * WORLD_SIZE + torch.distributed.all_gather_object( + obj=task_output.logged_samples, + object_list=full_samples, + ) + + if RANK == 0: + task_output.logged_samples = list( + itertools.chain.from_iterable(full_samples) + ) + + # then collect metrics across all ranks + for metrics in task_output.sample_metrics: + metric_list = [None] * WORLD_SIZE + torch.distributed.all_gather_object( + obj=task_output.sample_metrics[metrics], + object_list=metric_list, + ) + if RANK == 0: + task_output.sample_metrics[metrics] = list( + itertools.chain.from_iterable(metric_list) + ) + + if RANK == 0: + ### Aggregate results over all datapoints ### + # aggregate results ; run bootstrap CIs + for task_output in eval_tasks: + task_output.calculate_aggregate_metric(bootstrap_iters=bootstrap_iters) + results, samples, configs, versions, num_fewshot = consolidate_results( + eval_tasks + ) + + ### Calculate group metrics ### + if bool(results): + for group, task_list in reversed(task_hierarchy.items()): + if len(task_list) == 0: + # task_hierarchy entries are either + # `group_name: [subtask1, subtask2, ...]` + # or `task_name: []`. + # we only want to operate on groups here. + continue + metric_list = list( + { + key + for task in task_list + for key in results[task].keys() + if "_stderr" not in key and key not in ["alias", "samples"] + } + ) + for metric in metric_list: + stderr = "_stderr,".join(metric.split(",")) + + # gather metrics, sizes, and stderrs from subtasks + metrics = [ + results[task][metric] + for task in task_list + if metric in results[task] + ] # TODO: copy? + stderrs = [ + results[task][stderr] + for task in task_list + if stderr in results[task] + ] + sizes = [ + results[task]["samples"] + for task in task_list + if metric in results[task] + ] + + # compute group's pooled metric and stderr + results[group][ + metric + ] = lm_eval.api.metrics.aggregate_subtask_metrics(metrics, sizes) + # TODO: calculate grouped metric using aggregation fn + if "N/A" in stderrs: + results[group][stderr] = "N/A" + else: + results[group][ + stderr + ] = lm_eval.api.metrics.pooled_sample_stderr(stderrs, sizes) + # TODO: allow GroupConfigs to choose which variance formula is used, for back-compatibility + # To use the old (likely incorrect) variance formula, comment out the above and uncomment this line: + # results[group][stderr] = lm_eval.api.metrics.combined_sample_stderr(stderrs, sizes, metrics=metrics) + + results[group]["samples"] = sum(sizes) + + results_agg = defaultdict(dict) + groups_agg = defaultdict(dict) + all_tasks_list = list(task_hierarchy.keys()) + while True: + add_tasks_list = list(k for k in results_agg.keys()) + left_tasks_list = sorted(list(set(all_tasks_list) - set(add_tasks_list))) + if len(left_tasks_list) == 0: + break + + _task_hierarchy = { + k: v for k, v in task_hierarchy.items() if k in left_tasks_list + } + _results_agg, _groups_agg = prepare_print_tasks(_task_hierarchy, results) + + results_agg = {**results_agg, **_results_agg} + groups_agg = {**groups_agg, **_groups_agg} + + for group_name, task_list in task_hierarchy.items(): + if task_list: + num_fewshot[group_name] = num_fewshot[ + task_list[0] + ] # TODO: validate this + + results_dict = { + "results": dict(results_agg.items()), + **({"groups": dict(groups_agg.items())} if bool(groups_agg) else {}), + "group_subtasks": dict(reversed(task_hierarchy.items())), + "configs": dict(sorted(configs.items())), + "versions": dict(sorted(versions.items())), + "n-shot": dict(sorted(num_fewshot.items())), + } + if log_samples: + results_dict["samples"] = dict(samples) + + return results_dict + + else: + return None + + +def request_caching_arg_to_dict(cache_requests: str) -> dict: + request_caching_args = { + "cache_requests": cache_requests in {"true", "refresh"}, + "rewrite_requests_cache": cache_requests == "refresh", + "delete_requests_cache": cache_requests == "delete", + } + + return request_caching_args diff --git a/lm-evaluation/build/lib/lm_eval/evaluator_utils.py b/lm-evaluation/build/lib/lm_eval/evaluator_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..fcb18206f6a089df11fe72d8c96ba5a8c0629e88 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/evaluator_utils.py @@ -0,0 +1,312 @@ +import collections +import math +import pathlib +import sys +from typing import Dict, List, Optional, Tuple, Union + +from lm_eval.api import metrics +from lm_eval.utils import eval_logger, positional_deprecated + + +class TaskOutput: + """ + Wrapper class for Task outputs.It contains various attributes and methods to manage and calculate metrics for the task. + + Attributes: + task (object): The task object. + task_name (str): The name of the task. + task_config (dict): The configuration of the task. + version (str): The version of the task. + group_name (str): The name of the task group. + n_shot (int): The number of shots for the task. + task_alias (str): The alias of the task. + group_alias (str): The alias of the task group. + is_group (bool): Indicates if the task is a group. + logged_samples (list): The list of logged samples. + sample_len (int): The length of the samples. + sample_metrics (defaultdict): The dictionary of samples' metrics. + agg_metrics (defaultdict): The dictionary of aggregate metrics. + + Methods: + from_taskdict(cls, task_name: str, task): + Creates a TaskOutput instance from a task dictionary. + + calculate_aggregate_metric(bootstrap_iters=100000) -> None: + Calculates the aggregate metrics for the task. + """ + + def __init__( + self, + task=None, + task_name=None, + task_config=None, + version=None, + group_name=None, + n_shot=None, + task_alias=None, + group_alias=None, + is_group=None, + ): + self.task = task + self.task_config = task_config + self.task_name = task_name + self.group_name = group_name + self.version = version + self.n_shot = n_shot + self.task_alias = task_alias + self.group_alias = group_alias + self.is_group = is_group + self.logged_samples = [] + self.sample_len = None + self.sample_metrics = collections.defaultdict(list) + self.agg_metrics = collections.defaultdict(list) + + @classmethod + def from_taskdict(cls, task_name: str, task): + if isinstance(task, tuple): + group_name, task = task + else: + group_name = None + if not task: + # these gets filtered out in get_task_list + # once they are added to group hierarchy + is_group = True + return cls( + task=task, task_name=task_name, is_group=is_group, group_name=group_name + ) + version = task.VERSION + task_config = dict(task.dump_config()) + if (n_shot := task_config.get("num_fewshot")) == 0: + n_shot = task_config.get("metadata", {}).get("num_fewshot", 0) + task_alias = task_config.get("alias") + group_alias = task_config.get("group_alias") + return cls( + task=task, + task_name=task_name, + task_config=task_config, + group_name=group_name, + version=version, + n_shot=n_shot, + task_alias=task_alias, + group_alias=group_alias, + ) + + def calculate_aggregate_metric(self, bootstrap_iters=100000) -> None: + for (metric, filter_key), items in self.sample_metrics.items(): + agg_fn = self.task.aggregation()[metric] + metric_key = f"{metric},{filter_key}" + self.agg_metrics[metric_key] = agg_fn(items) + self.sample_len = len(items) # TODO: same sample size for each metric? + if bootstrap_iters: + stderr_fn = metrics.stderr_for_metric( + metric=agg_fn, + bootstrap_iters=min(bootstrap_iters, 100) + if metric in ["bleu", "chrf", "ter"] + else bootstrap_iters, + ) + self.agg_metrics[f"{metric}_stderr,{filter_key}"] = ( + stderr_fn(items) if (stderr_fn and len(items) > 1) else "N/A" + ) + + def __repr__(self): + return ( + f"TaskOutput(task_name={self.task_name}, " + f"group_name={self.group_name}, " + f"version={self.version}," + f"n_shot={self.n_shot}" + f"task_alias={self.task_alias}, group_alias={self.group_alias})" + ) + + +def get_task_list(task_dict: dict) -> Tuple[Dict[str, list], List[TaskOutput]]: + task_hierarchy = collections.defaultdict(list) + outputs = list(TaskOutput.from_taskdict(x, y) for x, y in task_dict.items()) + for task_output in outputs: + if group_name := task_output.group_name: + task_hierarchy[group_name].append(task_output.task_name) + else: + task_hierarchy[task_output.task_name] = [] + # returns task_hierarchy tracking which groups contain which subtasks, + # and a list of TaskOutput classes for each non-group subtask + return task_hierarchy, [x for x in outputs if x.task] + + +def print_writeout(task) -> None: + for inst in task.instances: + # print the prompt for the first few documents + if inst.doc_id < 1: + eval_logger.info( + f"Task: {task}; document {inst.doc_id}; context prompt (starting on next line):\ + \n{inst.args[0]}\n(end of prompt on previous line)\ntarget string or answer choice index (starting on next line):\n{task.doc_to_target(inst.doc)}\n(end of target on previous line)" + ) + eval_logger.info(f"Request: {str(inst)}") + + +def get_sample_size(task, limit: Optional[int]) -> Union[int, None]: + if limit is not None: + limit = ( + int(math.ceil(len(task.eval_docs) * limit)) if limit < 1.0 else int(limit) + ) + return limit + + +def prepare_print_tasks( + task_hierarchy: dict, results: dict, tab=0 +) -> Tuple[dict, dict]: + """ + @param task_hierarchy: Dictionary representing the group hierarchy of tasks. Each key is a group name and its + value is a list of task names. + @param results: Dictionary containing the results of each task. Each key is a + group name and its value is a dictionary of task results. + @param tab: The indentation level for printing the task + hierarchy. Default is 0. + @return: A tuple of two dictionaries: results_agg and groups_agg. results_agg contains + aggregated results for each task, and groups_agg contains aggregated results for each group. + + Prepares the task hierarchy and aggregates the results for each task and group recursively for printing. + """ + results_agg = collections.defaultdict(dict) + groups_agg = collections.defaultdict(dict) + + (group_name, task_list), *_ = task_hierarchy.items() + task_list = sorted(task_list) + + results_agg[group_name] = results[group_name].copy() + # results_agg[group_name]["tab"] = tab + if "samples" in results_agg[group_name]: + results_agg[group_name].pop("samples") + + tab_string = " " * tab + "- " if tab > 0 else "" + + if "alias" in results_agg[group_name]: + results_agg[group_name]["alias"] = tab_string + results_agg[group_name]["alias"] + else: + results_agg[group_name]["alias"] = tab_string + group_name + + if len(task_list) > 0: + groups_agg[group_name] = results[group_name].copy() + # groups_agg[group_name]["tab"] = tab + if "samples" in groups_agg[group_name]: + groups_agg[group_name].pop("samples") + + if "alias" in groups_agg[group_name]: + groups_agg[group_name]["alias"] = ( + tab_string + groups_agg[group_name]["alias"] + ) + else: + groups_agg[group_name]["alias"] = tab_string + group_name + + for task_name in task_list: + if task_name in task_hierarchy: + _task_hierarchy = { + **{task_name: task_hierarchy[task_name]}, + **task_hierarchy, + } + else: + _task_hierarchy = { + **{task_name: []}, + **task_hierarchy, + } + + _results_agg, _groups_agg = prepare_print_tasks( + _task_hierarchy, results, tab + 1 + ) + results_agg = {**results_agg, **_results_agg} + groups_agg = {**groups_agg, **_groups_agg} + + return results_agg, groups_agg + + +def consolidate_results( + eval_tasks: List[TaskOutput], +) -> Tuple[dict, dict, dict, dict, dict]: + """ + @param eval_tasks: list(TaskOutput). + @return: A tuple containing the consolidated results, samples, configs, versions, and num_fewshot. + + Consolidates the results of multiple evaluation tasks into a single structure. + + The method iterates over each evaluation instance and extracts relevant information to create the consolidated + results structure. The consolidated results structure has the following properties: + + - results: A defaultdict with task names as keys and dictionaries as values. Each dictionary contains + metric/filter pairs as keys and corresponding metric values as values. The "alias" key is used to store task + aliases specified in the task configuration. + - samples: A defaultdict with task names as keys and lists of log samples as values. + - configs: A defaultdict with task names as keys and task configurations as values. + - versions: A defaultdict with task names as keys and task versions as values. + - num_fewshot: A defaultdict with task names as keys and number of few-shot samples as values. + + The method then returns the consolidated results, samples, configs, versions, and num_fewshot as a tuple. + """ + # stores the final result for each task, for each metric/filter pair. + results = collections.defaultdict(dict) + # logs info about each document evaluated. + samples = collections.defaultdict(list) + # store num-fewshot value per task + num_fewshot = collections.defaultdict(int) + # Tracks the YAML configs of all chosen task + configs = collections.defaultdict(dict) + # Tracks each task's version. + versions = collections.defaultdict(dict) + for task_output in eval_tasks: + if "task_alias" in (task_config := task_output.task_config): + results[task_output.task_name]["alias"] = task_config["task_alias"] + if group_alias := task_output.group_alias: + if group_alias not in results and (group_name := task_output.group_name): + results[group_name]["alias"] = group_alias + num_fewshot[task_output.task_name] = task_output.n_shot + configs[task_output.task_name] = task_output.task_config + versions[task_output.task_name] = task_output.version + samples[task_output.task_name] = task_output.logged_samples + for (metric, filter_key), items in task_output.sample_metrics.items(): + metric_key = f"{metric},{filter_key}" + results[task_output.task_name][metric_key] = task_output.agg_metrics[ + metric_key + ] + results[task_output.task_name]["samples"] = task_output.sample_len + results[task_output.task_name][ + f"{metric}_stderr,{filter_key}" + ] = task_output.agg_metrics[f"{metric}_stderr,{filter_key}"] + return results, samples, configs, versions, num_fewshot + + +@positional_deprecated +def find_test_root(start_path: pathlib.Path) -> pathlib.Path: + """ + Search upward in the directory tree to a maximum of three layers + to find and return the package root (containing the 'tests' folder) + """ + cur_path = start_path.resolve() + max_layers = 3 + for _ in range(max_layers): + if (cur_path / "tests" / "test_version_stable.py").exists(): + return cur_path + else: + cur_path = cur_path.parent.resolve() + raise FileNotFoundError( + f"Unable to find package root within {max_layers} upwards" + f"of {start_path}" + ) + + +@positional_deprecated +def run_task_tests(task_list: List[str]): + """ + Find the package root and run the tests for the given tasks + """ + import pytest + + package_root = find_test_root(start_path=pathlib.Path(__file__)) + task_string = " or ".join(task_list) + args = [ + f"{package_root}/tests/test_version_stable.py", + f"--rootdir={package_root}", + "-k", + f"{task_string}", + ] + sys.path.append(str(package_root)) + pytest_return_val = pytest.main(args) + if pytest_return_val: + raise ValueError( + f"Not all tests for the specified tasks ({task_list}) ran successfully! Error code: {pytest_return_val}" + ) diff --git a/lm-evaluation/build/lib/lm_eval/logging_utils.py b/lm-evaluation/build/lib/lm_eval/logging_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..24ab11dad6a1f6d77e467c3ef506f22070f7270f --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/logging_utils.py @@ -0,0 +1,455 @@ +import copy +import json +import logging +import os +import re +import subprocess +from pathlib import Path +from typing import Any, Dict, List, Literal, Optional, Tuple, Union + +import numpy as np +import pandas as pd +from packaging.version import Version +from torch.utils.collect_env import get_pretty_env_info +from transformers import __version__ as trans_version + + +logger = logging.getLogger(__name__) + + +def remove_none_pattern(input_string: str) -> Tuple[str, bool]: + """Remove the ',none' substring from the input_string if it exists at the end. + + Args: + input_string (str): The input string from which to remove the ',none' substring. + + Returns: + Tuple[str, bool]: A tuple containing the modified input_string with the ',none' substring removed + and a boolean indicating whether the modification was made (True) or not (False). + """ + # Define the pattern to match ',none' at the end of the string + pattern = re.compile(r",none$") + + # Use sub() to replace ',none' with an empty string + result = re.sub(pattern, "", input_string) + + # check if the input_string changed + removed = result != input_string + + return result, removed + + +def _handle_non_serializable(o: Any) -> Union[int, str, list]: + """Handle non-serializable objects by converting them to serializable types. + + Args: + o (Any): The object to be handled. + + Returns: + Union[int, str, list]: The converted object. If the object is of type np.int64 or np.int32, + it will be converted to int. If the object is of type set, it will be converted + to a list. Otherwise, it will be converted to str. + """ + if isinstance(o, np.int64) or isinstance(o, np.int32): + return int(o) + elif isinstance(o, set): + return list(o) + else: + return str(o) + + +def get_wandb_printer() -> Literal["Printer"]: + """Returns a wandb printer instance for pretty stdout.""" + from wandb.sdk.lib.printer import get_printer + from wandb.sdk.wandb_settings import Settings + + printer = get_printer(Settings()._jupyter) + return printer + + +class WandbLogger: + def __init__(self, **kwargs) -> None: + """Attaches to wandb logger if already initialized. Otherwise, passes kwargs to wandb.init() + + Args: + kwargs Optional[Any]: Arguments for configuration. + + Parse and log the results returned from evaluator.simple_evaluate() with: + wandb_logger.post_init(results) + wandb_logger.log_eval_result() + wandb_logger.log_eval_samples(results["samples"]) + """ + try: + import wandb + + assert Version(wandb.__version__) >= Version("0.13.6") + if Version(wandb.__version__) < Version("0.13.6"): + wandb.require("report-editing:v0") + except Exception as e: + logger.warning( + "To use the wandb reporting functionality please install wandb>=0.13.6.\n" + "To install the latest version of wandb run `pip install wandb --upgrade`\n" + f"{e}" + ) + + self.wandb_args: Dict[str, Any] = kwargs + + # initialize a W&B run + if wandb.run is None: + self.run = wandb.init(**self.wandb_args) + else: + self.run = wandb.run + + self.printer = get_wandb_printer() + + def post_init(self, results: Dict[str, Any]) -> None: + self.results: Dict[str, Any] = copy.deepcopy(results) + self.task_names: List[str] = list(results.get("results", {}).keys()) + self.group_names: List[str] = list(results.get("groups", {}).keys()) + + def _get_config(self) -> Dict[str, Any]: + """Get configuration parameters.""" + self.task_configs = self.results.get("configs", {}) + cli_configs = self.results.get("config", {}) + configs = { + "task_configs": self.task_configs, + "cli_configs": cli_configs, + } + + return configs + + def _sanitize_results_dict(self) -> Tuple[Dict[str, str], Dict[str, Any]]: + """Sanitize the results dictionary.""" + _results = copy.deepcopy(self.results.get("results", dict())) + + # Remove None from the metric string name + tmp_results = copy.deepcopy(_results) + for task_name in self.task_names: + task_result = tmp_results.get(task_name, dict()) + for metric_name, metric_value in task_result.items(): + _metric_name, removed = remove_none_pattern(metric_name) + if removed: + _results[task_name][_metric_name] = metric_value + _results[task_name].pop(metric_name) + + # remove string valued keys from the results dict + wandb_summary = {} + for task in self.task_names: + task_result = _results.get(task, dict()) + for metric_name, metric_value in task_result.items(): + if isinstance(metric_value, str): + wandb_summary[f"{task}/{metric_name}"] = metric_value + + for summary_metric, summary_value in wandb_summary.items(): + _task, _summary_metric = summary_metric.split("/") + _results[_task].pop(_summary_metric) + + tmp_results = copy.deepcopy(_results) + for task_name, task_results in tmp_results.items(): + for metric_name, metric_value in task_results.items(): + _results[f"{task_name}/{metric_name}"] = metric_value + _results[task_name].pop(metric_name) + for task in self.task_names: + _results.pop(task) + + return wandb_summary, _results + + def _log_results_as_table(self) -> None: + """Generate and log evaluation results as a table to W&B.""" + columns = [ + "Version", + "Filter", + "num_fewshot", + "Metric", + "Value", + "Stderr", + ] + + def make_table(columns: List[str], key: str = "results"): + import wandb + + table = wandb.Table(columns=columns) + results = copy.deepcopy(self.results) + + for k, dic in results.get(key).items(): + if k in self.group_names and not key == "groups": + continue + version = results.get("versions").get(k) + if version == "N/A": + version = None + n = results.get("n-shot").get(k) + + for (mf), v in dic.items(): + m, _, f = mf.partition(",") + if m.endswith("_stderr"): + continue + if m == "alias": + continue + + if m + "_stderr" + "," + f in dic: + se = dic[m + "_stderr" + "," + f] + if se != "N/A": + se = "%.4f" % se + table.add_data(*[k, version, f, n, m, str(v), str(se)]) + else: + table.add_data(*[k, version, f, n, m, str(v), ""]) + + return table + + # log the complete eval result to W&B Table + table = make_table(["Tasks"] + columns, "results") + self.run.log({"evaluation/eval_results": table}) + + if "groups" in self.results.keys(): + table = make_table(["Groups"] + columns, "groups") + self.run.log({"evaluation/group_eval_results": table}) + + def _log_results_as_artifact(self) -> None: + """Log results as JSON artifact to W&B.""" + import wandb + + dumped = json.dumps( + self.results, indent=2, default=_handle_non_serializable, ensure_ascii=False + ) + artifact = wandb.Artifact("results", type="eval_results") + with artifact.new_file("results.json", mode="w", encoding="utf-8") as f: + f.write(dumped) + self.run.log_artifact(artifact) + + def log_eval_result(self) -> None: + """Log evaluation results to W&B.""" + # Log configs to wandb + configs = self._get_config() + self.run.config.update(configs) + + wandb_summary, self.wandb_results = self._sanitize_results_dict() + # update wandb.run.summary with items that were removed + self.run.summary.update(wandb_summary) + # Log the evaluation metrics to wandb + self.run.log(self.wandb_results) + # Log the evaluation metrics as W&B Table + self._log_results_as_table() + # Log the results dict as json to W&B Artifacts + self._log_results_as_artifact() + + def _generate_dataset( + self, data: List[Dict[str, Any]], config: Dict[str, Any] + ) -> pd.DataFrame: + """Generate a dataset from evaluation data. + + Args: + data (List[Dict[str, Any]]): The data to generate a dataset for. + config (Dict[str, Any]): The configuration of the task. + + Returns: + pd.DataFrame: A dataframe that is ready to be uploaded to W&B. + """ + ids = [x["doc_id"] for x in data] + labels = [x["target"] for x in data] + instance = [""] * len(ids) + resps = [""] * len(ids) + filtered_resps = [""] * len(ids) + model_outputs = {} + + metrics_list = config["metric_list"] + metrics = {} + for metric in metrics_list: + metric = metric.get("metric") + if metric in ["word_perplexity", "byte_perplexity", "bits_per_byte"]: + metrics[f"{metric}_loglikelihood"] = [x[metric][0] for x in data] + if metric in ["byte_perplexity", "bits_per_byte"]: + metrics[f"{metric}_bytes"] = [x[metric][1] for x in data] + else: + metrics[f"{metric}_words"] = [x[metric][1] for x in data] + else: + metrics[metric] = [x[metric] for x in data] + + if config["output_type"] == "loglikelihood": + instance = [x["arguments"][0][0] for x in data] + labels = [x["arguments"][0][1] for x in data] + resps = [ + f'log probability of continuation is {x["resps"][0][0][0]} ' + + "\n\n" + + "continuation will {} generated with greedy sampling".format( + "not be" if not x["resps"][0][0][1] else "be" + ) + for x in data + ] + filtered_resps = [ + f'log probability of continuation is {x["filtered_resps"][0][0]} ' + + "\n\n" + + "continuation will {} generated with greedy sampling".format( + "not be" if not x["filtered_resps"][0][1] else "be" + ) + for x in data + ] + elif config["output_type"] == "multiple_choice": + instance = [x["arguments"][0][0] for x in data] + choices = [ + "\n".join([f"{idx}. {y[1]}" for idx, y in enumerate(x["arguments"])]) + for x in data + ] + resps = [np.argmax([n[0][0] for n in x["resps"]]) for x in data] + filtered_resps = [ + np.argmax([n[0] for n in x["filtered_resps"]]) for x in data + ] + elif config["output_type"] == "loglikelihood_rolling": + instance = [x["arguments"][0][0] for x in data] + resps = [x["resps"][0][0] for x in data] + filtered_resps = [x["filtered_resps"][0] for x in data] + elif config["output_type"] == "generate_until": + instance = [x["arguments"][0][0] for x in data] + resps = [x["resps"][0][0] for x in data] + filtered_resps = [x["filtered_resps"][0] for x in data] + + model_outputs["raw_predictions"] = resps + model_outputs["filtered_predictions"] = filtered_resps + + df_data = { + "id": ids, + "data": instance, + } + if config["output_type"] == "multiple_choice": + df_data["choices"] = choices + + tmp_data = { + "input_len": [len(x) for x in instance], + "labels": labels, + "output_type": config["output_type"], + } + df_data.update(tmp_data) + df_data.update(model_outputs) + df_data.update(metrics) + + return pd.DataFrame(df_data) + + def _log_samples_as_artifact( + self, data: List[Dict[str, Any]], task_name: str + ) -> None: + import wandb + + # log the samples as an artifact + dumped = json.dumps( + data, + indent=2, + default=_handle_non_serializable, + ensure_ascii=False, + ) + artifact = wandb.Artifact(f"{task_name}", type="samples_by_task") + with artifact.new_file( + f"{task_name}_eval_samples.json", mode="w", encoding="utf-8" + ) as f: + f.write(dumped) + self.run.log_artifact(artifact) + # artifact.wait() + + def log_eval_samples(self, samples: Dict[str, List[Dict[str, Any]]]) -> None: + """Log evaluation samples to W&B. + + Args: + samples (Dict[str, List[Dict[str, Any]]]): Evaluation samples for each task. + """ + task_names: List[str] = [ + x for x in self.task_names if x not in self.group_names + ] + + ungrouped_tasks = [] + tasks_by_groups = {} + + for task_name in task_names: + group_names = self.task_configs[task_name].get("group", None) + if group_names: + if isinstance(group_names, str): + group_names = [group_names] + + for group_name in group_names: + if not tasks_by_groups.get(group_name): + tasks_by_groups[group_name] = [task_name] + else: + tasks_by_groups[group_name].append(task_name) + else: + ungrouped_tasks.append(task_name) + + for task_name in ungrouped_tasks: + eval_preds = samples[task_name] + + # log the samples as a W&B Table + df = self._generate_dataset(eval_preds, self.task_configs.get(task_name)) + self.run.log({f"{task_name}_eval_results": df}) + + # log the samples as a json file as W&B Artifact + self._log_samples_as_artifact(eval_preds, task_name) + + for group, grouped_tasks in tasks_by_groups.items(): + grouped_df = pd.DataFrame() + for task_name in grouped_tasks: + eval_preds = samples[task_name] + df = self._generate_dataset( + eval_preds, self.task_configs.get(task_name) + ) + df["group"] = group + df["task"] = task_name + grouped_df = pd.concat([grouped_df, df], ignore_index=True) + + # log the samples as a json file as W&B Artifact + self._log_samples_as_artifact(eval_preds, task_name) + + self.run.log({f"{group}_eval_results": grouped_df}) + + +def get_commit_from_path(repo_path: Union[Path, str]) -> Optional[str]: + try: + git_folder = Path(repo_path, ".git") + if git_folder.is_file(): + git_folder = Path( + git_folder.parent, + git_folder.read_text(encoding="utf-8").split("\n")[0].split(" ")[-1], + ) + if Path(git_folder, "HEAD").exists(): + head_name = ( + Path(git_folder, "HEAD") + .read_text(encoding="utf-8") + .split("\n")[0] + .split(" ")[-1] + ) + head_ref = Path(git_folder, head_name) + git_hash = head_ref.read_text(encoding="utf-8").replace("\n", "") + else: + git_hash = None + except Exception as err: + logger.debug( + f"Failed to retrieve a Git commit hash from path: {str(repo_path)}. Error: {err}" + ) + return None + return git_hash + + +def get_git_commit_hash(): + """ + Gets the git commit hash of your current repo (if it exists). + Source: https://github.com/EleutherAI/gpt-neox/blob/b608043be541602170bfcfb8ec9bf85e8a0799e0/megatron/neox_arguments/neox_args.py#L42 + """ + try: + git_hash = subprocess.check_output(["git", "describe", "--always"]).strip() + git_hash = git_hash.decode() + except (subprocess.CalledProcessError, FileNotFoundError): + # FileNotFoundError occurs when git not installed on system + git_hash = get_commit_from_path(os.getcwd()) # git hash of repo if exists + return git_hash + + +def add_env_info(storage: Dict[str, Any]): + try: + pretty_env_info = get_pretty_env_info() + except Exception as err: + pretty_env_info = str(err) + transformers_version = trans_version + upper_dir_commit = get_commit_from_path( + Path(os.getcwd(), "..") + ) # git hash of upper repo if exists + added_info = { + "pretty_env_info": pretty_env_info, + "transformers_version": transformers_version, + "upper_git_hash": upper_dir_commit, # in case this repo is submodule + } + storage.update(added_info) diff --git a/lm-evaluation/build/lib/lm_eval/tasks/__init__.py b/lm-evaluation/build/lib/lm_eval/tasks/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..53a41e9c6c3fa9b6d26b7d5a2066f652a30844fc --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/__init__.py @@ -0,0 +1,446 @@ +import collections +import logging +import os +from functools import partial +from typing import Dict, List, Mapping, Optional, Union + +from lm_eval import utils +from lm_eval.api.task import ConfigurableTask, Task + + +class TaskManager: + """TaskManager indexes all tasks from the default `lm_eval/tasks/` + and an optional directory if provided. + + """ + + def __init__(self, verbosity="INFO", include_path: Optional[str] = None) -> None: + self.verbosity = verbosity + self.include_path = include_path + self.logger = utils.eval_logger + self.logger.setLevel(getattr(logging, f"{verbosity}")) + + self._task_index = self.initialize_tasks(include_path=include_path) + self._all_tasks = sorted(list(self._task_index.keys())) + + self.task_group_map = collections.defaultdict(list) + + def initialize_tasks(self, include_path: Optional[str] = None): + """Creates a dictionary of tasks index. + + :param include_path: str = None + An additional path to be searched for tasks + + :return + Dictionary of task names as key and task metadata + """ + all_paths = [os.path.dirname(os.path.abspath(__file__)) + "/"] + if include_path is not None: + if isinstance(include_path, str): + include_path = [include_path] + all_paths.extend(include_path) + + task_index = {} + for task_dir in all_paths: + tasks = self._get_task_and_group(task_dir) + task_index = {**tasks, **task_index} + + return task_index + + @property + def all_tasks(self): + return self._all_tasks + + @property + def task_index(self): + return self._task_index + + def match_tasks(self, task_list): + return utils.pattern_match(task_list, self.all_tasks) + + def _name_is_registered(self, name) -> bool: + if name in self.all_tasks: + return True + return False + + def _name_is_task(self, name) -> bool: + if self._name_is_registered(name) and ("task" in self.task_index[name]["type"]): + return True + return False + + def _name_is_group(self, name) -> bool: + if self._name_is_registered(name) and ( + self.task_index[name]["type"] == "group" + ): + return True + return False + + def _name_is_python_task(self, name): + if self._name_is_registered(name) and ( + self.task_index[name]["type"] == "python_task" + ): + return True + return False + + def _config_is_task(self, config) -> bool: + if ("task" in config) and isinstance(config["task"], str): + return True + return False + + def _config_is_group(self, config) -> bool: + if ("task" in config) and isinstance(config["task"], list): + return True + return False + + def _config_is_python_task(self, config) -> bool: + if "class" in config: + return True + return False + + def _get_yaml_path(self, name): + if name not in self.task_index: + raise ValueError + return self.task_index[name]["yaml_path"] + + def _get_config(self, name): + if name not in self.task_index: + raise ValueError + yaml_path = self._get_yaml_path(name) + if yaml_path == -1: + return {} + else: + return utils.load_yaml_config(yaml_path, mode="full") + + def _get_tasklist(self, name): + if self._name_is_task(name): + raise ValueError + return self.task_index[name]["task"] + + def _process_alias(self, config, group=None): + # If the group is not the same as the original + # group which the group alias was intended for, + # Set the group_alias to None instead. + if ("group_alias" in config) and ("group" in config) and group is not None: + if config["group"] != group: + config["group_alias"] = None + return config + + def _load_individual_task_or_group( + self, + name_or_config: Optional[Union[str, dict]] = None, + parent_name: Optional[str] = None, + update_config: Optional[dict] = None, + yaml_path: Optional[str] = None, + ) -> Mapping: + def load_task(config, task, group=None, yaml_path=None): + if "include" in config: + if yaml_path is None: + raise ValueError + config.update( + utils.load_yaml_config( + yaml_path, + yaml_config={"include": config.pop("include")}, + mode="full", + ) + ) + if self._config_is_python_task(config): + task_object = config["class"]() + else: + config = self._process_alias(config, group=group) + task_object = ConfigurableTask(config=config) + if group is not None: + task_object = (group, task_object) + return {task: task_object} + + if isinstance(name_or_config, str): + if update_config is not None: + # Process name_or_config as a dict instead + name_or_config = {"task": name_or_config, **update_config} + elif self._name_is_task(name_or_config): + task_config = self._get_config(name_or_config) + return load_task(task_config, task=name_or_config, group=parent_name) + else: + group_name = name_or_config + subtask_list = self._get_tasklist(name_or_config) + if subtask_list == -1: + group_config = self._get_config(name_or_config) + subtask_list = group_config["task"] + + # This checks if we're at the root. + if parent_name is None: + group_config = self._get_config(name_or_config) + if set(group_config.keys()) > {"task", "group"}: + update_config = { + k: v + for k, v in group_config.items() + if k not in ["task", "group"] + } + yaml_path = self._get_yaml_path(group_name) + + if (update_config is not None) and ("group_alias" in update_config): + group_name = update_config["group_alias"] + update_config.pop("group_alias") + + if isinstance(name_or_config, dict): + if update_config is not None: + name_or_config = { + **name_or_config, + **update_config, + } + + if self._config_is_task(name_or_config): + name = name_or_config["task"] + # If the name is registered as a group + # if self._name_is_task(name) is False: + if self._name_is_group(name): + group_name = name + update_config = { + k: v for k, v in name_or_config.items() if k != "task" + } + subtask_list = self._get_tasklist(name) + if subtask_list == -1: + subtask_list = self._get_config(name)["task"] + else: + if self._name_is_registered(name): + base_task_config = self._get_config(name) + + # Check if this is a duplicate. + if parent_name is not None: + name_or_config["group"] = parent_name + num_duplicate = len( + list( + filter( + lambda x: x.startswith(name), + self.task_group_map[parent_name], + ) + ) + ) + if num_duplicate > 0: + name = f"{name}-{num_duplicate}" + self.task_group_map[parent_name].append(name) + + task_config = { + **base_task_config, + **name_or_config, + } + else: + task_config = name_or_config + return load_task( + task_config, task=name, group=parent_name, yaml_path=yaml_path + ) + else: + group_name = name_or_config["group"] + subtask_list = name_or_config["task"] + if set(name_or_config.keys()) > {"task", "group"}: + update_config = { + k: v + for k, v in name_or_config.items() + if k not in ["task", "group"] + } + + all_subtasks = {} + if parent_name is not None: + all_subtasks = {group_name: (parent_name, None)} + + fn = partial( + self._load_individual_task_or_group, + parent_name=group_name, + update_config=update_config, + yaml_path=yaml_path, + ) + all_subtasks = { + **all_subtasks, + **dict(collections.ChainMap(*map(fn, subtask_list))), + } + return all_subtasks + + def load_task_or_group(self, task_list: Optional[Union[str, list]] = None) -> dict: + """Loads a dictionary of task objects from a list + + :param task_list: Union[str, list] = None + Single string or list of string of task names to be loaded + + :return + Dictionary of task objects + """ + if isinstance(task_list, str): + task_list = [task_list] + + all_loaded_tasks = dict( + collections.ChainMap(*map(self._load_individual_task_or_group, task_list)) + ) + return all_loaded_tasks + + def load_config(self, config: Dict): + return self._load_individual_task_or_group(config) + + def _get_task_and_group(self, task_dir: str): + """Creates a dictionary of tasks index with the following metadata, + - `type`, that can be either `task`, `python_task`, or `group`. + `task` refer to regular task configs, `python_task` are special + yaml files that only consists of `task` and `class` parameters. + `group` are group configs. + - `yaml_path`, path to the yaml file. If the entry is a `group` that + was configured through a task config, the yaml_path will be -1 + and all subtasks will be listed in `task` (see below) + - `task`, reserved for entries with `type` as `group`. This will list + all subtasks. When a group config is created (as opposed to task + config having `group` parameter set), this will be set to -1 to + avoid recursive indexing. The whole list of subtasks will be loaded + at evaluation. + + :param task_dir: str + A directory to check for tasks + + :return + Dictionary of task names as key and task metadata + """ + tasks_and_groups = collections.defaultdict() + for root, _, file_list in os.walk(task_dir): + for f in file_list: + if f.endswith(".yaml"): + yaml_path = os.path.join(root, f) + config = utils.load_yaml_config(yaml_path, mode="simple") + if self._config_is_python_task(config): + # This is a python class config + tasks_and_groups[config["task"]] = { + "type": "python_task", + "yaml_path": yaml_path, + } + elif self._config_is_group(config): + # This is a group config + tasks_and_groups[config["group"]] = { + "type": "group", + "task": -1, # This signals that + # we don't need to know + # the task list for indexing + # as it can be loaded + # when called. + "yaml_path": yaml_path, + } + + # # Registered the level 1 tasks from a group config + # for config in config["task"]: + # if isinstance(config, dict) and self._config_is_task(config): + # task = config["task"] + # tasks_and_groups[task] = { + # "type": "task", + # "yaml_path": yaml_path, + # } + + elif self._config_is_task(config): + # This is a task config + task = config["task"] + tasks_and_groups[task] = { + "type": "task", + "yaml_path": yaml_path, + } + + if "group" in config: + groups = config["group"] + if isinstance(config["group"], str): + groups = [groups] + + for group in groups: + if group not in tasks_and_groups: + tasks_and_groups[group] = { + "type": "group", + "task": [task], + "yaml_path": -1, + } + else: + tasks_and_groups[group]["task"].append(task) + else: + self.logger.debug(f"File {f} in {root} could not be loaded") + + return tasks_and_groups + + +def get_task_name_from_config(task_config: Dict[str, str]) -> str: + if "task" in task_config: + return task_config["task"] + if "dataset_name" in task_config: + return "{dataset_path}_{dataset_name}".format(**task_config) + else: + return "{dataset_path}".format(**task_config) + + +def get_task_name_from_object(task_object): + if hasattr(task_object, "config"): + return task_object._config["task"] + + # TODO: scrap this + # this gives a mechanism for non-registered tasks to have a custom name anyways when reporting + return ( + task_object.EVAL_HARNESS_NAME + if hasattr(task_object, "EVAL_HARNESS_NAME") + else type(task_object).__name__ + ) + + +def get_task_dict( + task_name_list: Union[str, List[Union[str, Dict, Task]]], + task_manager: Optional[TaskManager] = None, +): + """Creates a dictionary of task objects from either a name of task, config, or prepared Task object. + + :param task_name_list: List[Union[str, Dict, Task]] + Name of model or LM object, see lm_eval.models.get_model + :param task_manager: TaskManager = None + A TaskManager object that stores indexed tasks. If not set, + task_manager will load one. This should be set by the user + if there are additional paths that want to be included + via `include_path` + + :return + Dictionary of task objects + """ + task_name_from_string_dict = {} + task_name_from_config_dict = {} + task_name_from_object_dict = {} + + if isinstance(task_name_list, str): + task_name_list = [task_name_list] + elif isinstance(task_name_list, list): + if not all([isinstance(task, (str, dict, Task)) for task in task_name_list]): + raise TypeError( + "Expected all list items to be of types 'str', 'dict', or 'Task', but at least one entry did not match." + ) + else: + raise TypeError( + f"Expected a 'str' or 'list' but received {type(task_name_list)}." + ) + + string_task_name_list = [task for task in task_name_list if isinstance(task, str)] + others_task_name_list = [task for task in task_name_list if ~isinstance(task, str)] + if len(string_task_name_list) > 0: + if task_manager is None: + task_manager = TaskManager() + + task_name_from_string_dict = task_manager.load_task_or_group( + string_task_name_list + ) + + for task_element in others_task_name_list: + if isinstance(task_element, dict): + task_name_from_config_dict = { + **task_name_from_config_dict, + **task_manager.load_config(config=task_element), + } + + elif isinstance(task_element, Task): + task_name_from_object_dict = { + **task_name_from_object_dict, + get_task_name_from_object(task_element): task_element, + } + + if not set(task_name_from_string_dict.keys()).isdisjoint( + set(task_name_from_object_dict.keys()) + ): + raise ValueError + + return { + **task_name_from_string_dict, + **task_name_from_config_dict, + **task_name_from_object_dict, + } diff --git a/lm-evaluation/build/lib/lm_eval/tasks/anli/README.md b/lm-evaluation/build/lib/lm_eval/tasks/anli/README.md new file mode 100644 index 0000000000000000000000000000000000000000..ba3f99d4826f0604f583772a2b48fe676a6f3e06 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/anli/README.md @@ -0,0 +1,56 @@ +# ANLI + +### Paper + +Title: `Adversarial NLI: A New Benchmark for Natural Language Understanding` + +Paper Link: https://arxiv.org/abs/1910.14599 + +Adversarial NLI (ANLI) is a dataset collected via an iterative, adversarial +human-and-model-in-the-loop procedure. It consists of three rounds that progressively +increase in difficulty and complexity, and each question-answer includes annotator- +provided explanations. + +Homepage: https://github.com/facebookresearch/anli + +### Citation + +``` +@inproceedings{nie-etal-2020-adversarial, + title = "Adversarial {NLI}: A New Benchmark for Natural Language Understanding", + author = "Nie, Yixin and + Williams, Adina and + Dinan, Emily and + Bansal, Mohit and + Weston, Jason and + Kiela, Douwe", + booktitle = "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics", + year = "2020", + publisher = "Association for Computational Linguistics", +} +``` + +### Groups and Tasks + +#### Groups + +* `anli`: Evaluates `anli_r1`, `anli_r2`, and `anli_r3` + +#### Tasks +* `anli_r1`: The data collected adversarially in the first round. +* `anli_r2`: The data collected adversarially in the second round, after training on the previous round's data. +* `anli_r3`: The data collected adversarially in the third round, after training on the previous multiple rounds of data. + + +### Checklist + +For adding novel benchmarks/datasets to the library: + * [x] Is the task an existing benchmark in the literature? + * [x] Have you referenced the original paper that introduced the task? + * [ ] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test? + + +If other tasks on this dataset are already supported: +* [ ] Is the "Main" variant of this task clearly denoted? +* [x] Have you provided a short sentence in a README on what each new variant adds / evaluates? +* [ ] Have you noted which, if any, published evaluation setups are matched by this variant? diff --git a/lm-evaluation/build/lib/lm_eval/tasks/anli/anli_r1.yaml b/lm-evaluation/build/lib/lm_eval/tasks/anli/anli_r1.yaml new file mode 100644 index 0000000000000000000000000000000000000000..bcf7674ee1bfc91f35e1566a6ddc5dc946c0ba72 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/anli/anli_r1.yaml @@ -0,0 +1,26 @@ +group: + - anli +task: anli_r1 +dataset_path: anli +dataset_name: null +output_type: multiple_choice +training_split: train_r1 +validation_split: dev_r1 +test_split: test_r1 +doc_to_text: "{{premise}}\nQuestion: {{hypothesis}} True, False, or Neither?\nAnswer:" +# True = entailment +# False = contradiction +# Neither = neutral +doc_to_target: "{{['True', 'Neither', 'False'][label]}}" +doc_to_choice: + - "True" + - "Neither" + - "False" +should_decontaminate: true +doc_to_decontamination_query: premise +metric_list: + - metric: acc + aggregation: mean + higher_is_better: true +metadata: + version: 1.0 diff --git a/lm-evaluation/build/lib/lm_eval/tasks/anli/anli_r2.yaml b/lm-evaluation/build/lib/lm_eval/tasks/anli/anli_r2.yaml new file mode 100644 index 0000000000000000000000000000000000000000..85f28d67cf230fa36cd38dd8d6a345f6e679c53e --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/anli/anli_r2.yaml @@ -0,0 +1,5 @@ +include: anli_r1.yaml +task: anli_r2 +training_split: train_r2 +validation_split: dev_r2 +test_split: test_r2 diff --git a/lm-evaluation/build/lib/lm_eval/tasks/anli/anli_r3.yaml b/lm-evaluation/build/lib/lm_eval/tasks/anli/anli_r3.yaml new file mode 100644 index 0000000000000000000000000000000000000000..6b9f98a867f7d03b90e84a425dc8b044b4cc96fb --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/anli/anli_r3.yaml @@ -0,0 +1,5 @@ +include: anli_r1.yaml +task: anli_r3 +training_split: train_r3 +validation_split: dev_r3 +test_split: test_r3 diff --git a/lm-evaluation/build/lib/lm_eval/tasks/arithmetic/README.md b/lm-evaluation/build/lib/lm_eval/tasks/arithmetic/README.md new file mode 100644 index 0000000000000000000000000000000000000000..e7d7f89efbbd3af29e5e1c28b1af1adb93073569 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/arithmetic/README.md @@ -0,0 +1,60 @@ +# Arithmetic + +### Paper + +Title: `Language Models are Few-Shot Learners` +Abstract: https://arxiv.org/abs/2005.14165 + +A small battery of 10 tests that involve asking language models a simple arithmetic +problem in natural language. + +Homepage: https://github.com/openai/gpt-3/tree/master/data + + +### Citation + +``` +@inproceedings{NEURIPS2020_1457c0d6, + author = {Brown, Tom and Mann, Benjamin and Ryder, Nick and Subbiah, Melanie and Kaplan, Jared D and Dhariwal, Prafulla and Neelakantan, Arvind and Shyam, Pranav and Sastry, Girish and Askell, Amanda and Agarwal, Sandhini and Herbert-Voss, Ariel and Krueger, Gretchen and Henighan, Tom and Child, Rewon and Ramesh, Aditya and Ziegler, Daniel and Wu, Jeffrey and Winter, Clemens and Hesse, Chris and Chen, Mark and Sigler, Eric and Litwin, Mateusz and Gray, Scott and Chess, Benjamin and Clark, Jack and Berner, Christopher and McCandlish, Sam and Radford, Alec and Sutskever, Ilya and Amodei, Dario}, + booktitle = {Advances in Neural Information Processing Systems}, + editor = {H. Larochelle and M. Ranzato and R. Hadsell and M. F. Balcan and H. Lin}, + pages = {1877--1901}, + publisher = {Curran Associates, Inc.}, + title = {Language Models are Few-Shot Learners}, + url = {https://proceedings.neurips.cc/paper/2020/file/1457c0d6bfcb4967418bfb8ac142f64a-Paper.pdf}, + volume = {33}, + year = {2020} +} +``` + +### Groups and Tasks + +#### Groups + +* `arithmetic`: Evaluates `1dc` to `5ds` + +#### Tasks + +* `arithmetic_1dc` +* `arithmetic_2da` +* `arithmetic_2dm` +* `arithmetic_2ds` +* `arithmetic_3da` +* `arithmetic_3ds` +* `arithmetic_4da` +* `arithmetic_4ds` +* `arithmetic_5da` +* `arithmetic_5ds` + +### Checklist + +For adding novel benchmarks/datasets to the library: +* [ ] Is the task an existing benchmark in the literature? + * [ ] Have you referenced the original paper that introduced the task? + * [ ] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test? + + +If other tasks on this dataset are already supported: +* [ ] Is the "Main" variant of this task clearly denoted? +* [ ] Have you provided a short sentence in a README on what each new variant adds / evaluates? +* [ ] Have you noted which, if any, published evaluation setups are matched by this variant? diff --git a/lm-evaluation/build/lib/lm_eval/tasks/arithmetic/arithmetic_1dc.yaml b/lm-evaluation/build/lib/lm_eval/tasks/arithmetic/arithmetic_1dc.yaml new file mode 100644 index 0000000000000000000000000000000000000000..3e8d414a60c1f9df7c635fafd34b7a2f39a36865 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/arithmetic/arithmetic_1dc.yaml @@ -0,0 +1,18 @@ +group: + - arithmetic +task: arithmetic_1dc +dataset_path: EleutherAI/arithmetic +dataset_name: arithmetic_1dc +output_type: loglikelihood +validation_split: validation +test_split: null +doc_to_text: "{{context}}" +doc_to_target: "{{completion}}" +metric_list: + - metric: acc + aggregation: mean + higher_is_better: true +metadata: + version: 1.0 +dataset_kwargs: + trust_remote_code: true diff --git a/lm-evaluation/build/lib/lm_eval/tasks/arithmetic/arithmetic_2da.yaml b/lm-evaluation/build/lib/lm_eval/tasks/arithmetic/arithmetic_2da.yaml new file mode 100644 index 0000000000000000000000000000000000000000..a186d76e8971072947dd6e9322e701ecc8815e89 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/arithmetic/arithmetic_2da.yaml @@ -0,0 +1,5 @@ +include: arithmetic_1dc.yaml +task: arithmetic_2da +dataset_name: arithmetic_2da +dataset_kwargs: + trust_remote_code: true diff --git a/lm-evaluation/build/lib/lm_eval/tasks/arithmetic/arithmetic_2dm.yaml b/lm-evaluation/build/lib/lm_eval/tasks/arithmetic/arithmetic_2dm.yaml new file mode 100644 index 0000000000000000000000000000000000000000..471bd4b4449f280412d9ee69566d4f80fd623671 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/arithmetic/arithmetic_2dm.yaml @@ -0,0 +1,5 @@ +include: arithmetic_1dc.yaml +task: arithmetic_2dm +dataset_name: arithmetic_2dm +dataset_kwargs: + trust_remote_code: true diff --git a/lm-evaluation/build/lib/lm_eval/tasks/arithmetic/arithmetic_2ds.yaml b/lm-evaluation/build/lib/lm_eval/tasks/arithmetic/arithmetic_2ds.yaml new file mode 100644 index 0000000000000000000000000000000000000000..f8e762486b818ee8b2962c94f46edaefb36da6b5 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/arithmetic/arithmetic_2ds.yaml @@ -0,0 +1,5 @@ +include: arithmetic_1dc.yaml +task: arithmetic_2ds +dataset_name: arithmetic_2ds +dataset_kwargs: + trust_remote_code: true diff --git a/lm-evaluation/build/lib/lm_eval/tasks/arithmetic/arithmetic_3da.yaml b/lm-evaluation/build/lib/lm_eval/tasks/arithmetic/arithmetic_3da.yaml new file mode 100644 index 0000000000000000000000000000000000000000..a4870d04f0c47ea61a75504ce051bd929ee1840e --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/arithmetic/arithmetic_3da.yaml @@ -0,0 +1,5 @@ +include: arithmetic_1dc.yaml +task: arithmetic_3da +dataset_name: arithmetic_3da +dataset_kwargs: + trust_remote_code: true diff --git a/lm-evaluation/build/lib/lm_eval/tasks/arithmetic/arithmetic_3ds.yaml b/lm-evaluation/build/lib/lm_eval/tasks/arithmetic/arithmetic_3ds.yaml new file mode 100644 index 0000000000000000000000000000000000000000..37f9ff0d2536d6c55c3e0f1676fe8218395d7b6c --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/arithmetic/arithmetic_3ds.yaml @@ -0,0 +1,5 @@ +include: arithmetic_1dc.yaml +task: arithmetic_3ds +dataset_name: arithmetic_3ds +dataset_kwargs: + trust_remote_code: true diff --git a/lm-evaluation/build/lib/lm_eval/tasks/arithmetic/arithmetic_4da.yaml b/lm-evaluation/build/lib/lm_eval/tasks/arithmetic/arithmetic_4da.yaml new file mode 100644 index 0000000000000000000000000000000000000000..4c04c6249fc520010317fe2503813acf86780844 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/arithmetic/arithmetic_4da.yaml @@ -0,0 +1,5 @@ +include: arithmetic_1dc.yaml +task: arithmetic_4da +dataset_name: arithmetic_4da +dataset_kwargs: + trust_remote_code: true diff --git a/lm-evaluation/build/lib/lm_eval/tasks/arithmetic/arithmetic_4ds.yaml b/lm-evaluation/build/lib/lm_eval/tasks/arithmetic/arithmetic_4ds.yaml new file mode 100644 index 0000000000000000000000000000000000000000..282b3d1e51e886b3509a68ffb921238eb8e49cb0 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/arithmetic/arithmetic_4ds.yaml @@ -0,0 +1,5 @@ +include: arithmetic_1dc.yaml +task: arithmetic_4ds +dataset_name: arithmetic_4ds +dataset_kwargs: + trust_remote_code: true diff --git a/lm-evaluation/build/lib/lm_eval/tasks/arithmetic/arithmetic_5da.yaml b/lm-evaluation/build/lib/lm_eval/tasks/arithmetic/arithmetic_5da.yaml new file mode 100644 index 0000000000000000000000000000000000000000..5365cfbeb94d8fea5d782500a8f88ecfc19dafdb --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/arithmetic/arithmetic_5da.yaml @@ -0,0 +1,5 @@ +include: arithmetic_1dc.yaml +task: arithmetic_5da +dataset_name: arithmetic_5da +dataset_kwargs: + trust_remote_code: true diff --git a/lm-evaluation/build/lib/lm_eval/tasks/arithmetic/arithmetic_5ds.yaml b/lm-evaluation/build/lib/lm_eval/tasks/arithmetic/arithmetic_5ds.yaml new file mode 100644 index 0000000000000000000000000000000000000000..51d95da0074dd32b7c99e0d80e2a54765279c5bc --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/arithmetic/arithmetic_5ds.yaml @@ -0,0 +1,5 @@ +include: arithmetic_1dc.yaml +task: arithmetic_5ds +dataset_name: arithmetic_5ds +dataset_kwargs: + trust_remote_code: true diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bbh/README.md b/lm-evaluation/build/lib/lm_eval/tasks/bbh/README.md new file mode 100644 index 0000000000000000000000000000000000000000..9ef2a4abdad4ac293d8a74865397ae7ed08a16ca --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bbh/README.md @@ -0,0 +1,49 @@ +# BigBenchHard + +## Paper +Title: `Challenging BIG-Bench Tasks and Whether Chain-of-Thought Can Solve Them` +Abstract: https://arxiv.org/abs/2210.09261 + +A suite of 23 challenging BIG-Bench tasks which we call BIG-Bench Hard (BBH). +These are the task for which prior language model evaluations did not outperform +the average human-rater. + +Homepage: https://github.com/suzgunmirac/BIG-Bench-Hard + + +## Citation +``` +@article{suzgun2022challenging, + title={Challenging BIG-Bench Tasks and Whether Chain-of-Thought Can Solve Them}, + author={Suzgun, Mirac and Scales, Nathan and Sch{\"a}rli, Nathanael and Gehrmann, Sebastian and Tay, Yi and Chung, Hyung Won and Chowdhery, Aakanksha and Le, Quoc V and Chi, Ed H and Zhou, Denny and and Wei, Jason}, + journal={arXiv preprint arXiv:2210.09261}, + year={2022} +} +``` + +### Groups and Tasks + +#### Groups + +- `bbh_zeroshot` +- `bbh_fewshot` +- `bbh_cot_fewshot` +- `bbh_cot_zeroshot` + + +#### Tasks + +- ... + +### Checklist + +- [x] Is in Eval-harness v1.0 ? +- [ ] Has been checked for regression from v1.0? +- [ ] Has been checked for equivalence with original paper methodology? +- [ ] "Main" checked variant clearly denoted? + +### Variant Wishlist + +- [ ] Variant with Calculator (see https://github.com/openai/grade-school-math/blob/master/grade_school_math/calculator.py for example implementation) +- [ ] Using Verifiers +- [ ] Majority voting "without CoT" diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bbh/_generate_configs.py b/lm-evaluation/build/lib/lm_eval/tasks/bbh/_generate_configs.py new file mode 100644 index 0000000000000000000000000000000000000000..febee5fcd43aae9f328545a6ee3a3699d541a073 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bbh/_generate_configs.py @@ -0,0 +1,80 @@ +""" +Take in a YAML, and output all other splits with this YAML +""" +import argparse +import os +import re + +import datasets +import requests +import yaml +from tqdm import tqdm + +from lm_eval import utils + + +def parse_args(): + parser = argparse.ArgumentParser() + parser.add_argument("--base_yaml_path", required=True) + parser.add_argument("--save_prefix_path", default="zeroshot") + parser.add_argument("--cot", default=False) + parser.add_argument("--fewshot", default=False) + parser.add_argument("--task_prefix", default="") + return parser.parse_args() + + +if __name__ == "__main__": + args = parse_args() + + # get filename of base_yaml so we can `"include": ` it in our other YAMLs. + base_yaml_name = os.path.split(args.base_yaml_path)[-1] + with open(args.base_yaml_path, encoding="utf-8") as f: + base_yaml = yaml.full_load(f) + + base_doc_to_text = "Q: {{input}}\nA:" + answer_regex = re.compile("(?<=answer is )(.*)(?=.)") + + dataset_path = "lukaemon/bbh" + for task in tqdm(datasets.get_dataset_infos(dataset_path).keys()): + resp = requests.get( + f"https://raw.githubusercontent.com/suzgunmirac/BIG-Bench-Hard/main/cot-prompts/{task}.txt" + ).content.decode("utf-8") + prompt = resp.split("\n-----\n")[-1] + description, *few_shot = prompt.split("\n\n") + + prefix_doc_to_text = "" + if args.fewshot: + if args.cot: + prefix_doc_to_text = "\n\n".join(few_shot) + "\n\n" + else: + for shot in few_shot: + try: + answer = answer_regex.search(shot)[0] + except Exception: + print("task", task) + print(shot) + example = shot.split("Let's think step by step.")[0] + prefix_doc_to_text += f"{example}{answer}\n\n" + + doc_to_text = prefix_doc_to_text + base_doc_to_text + if args.cot: + doc_to_text = doc_to_text + " Let's think step by step.\n" + + yaml_dict = { + "include": base_yaml_name, + "task": f"bbh_{args.task_prefix}_{task}", + "dataset_name": task, + "description": description + "\n\n", + "doc_to_text": doc_to_text, + } + + file_save_path = args.save_prefix_path + f"/{task}.yaml" + utils.eval_logger.info(f"Saving yaml for subset {task} to {file_save_path}") + with open(file_save_path, "w", encoding="utf-8") as yaml_file: + yaml.dump( + yaml_dict, + yaml_file, + width=float("inf"), + allow_unicode=True, + default_style='"', + ) diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bbh/cot_zeroshot/boolean_expressions.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bbh/cot_zeroshot/boolean_expressions.yaml new file mode 100644 index 0000000000000000000000000000000000000000..0d28c969b6bdd3445d8b246659f4f2bf9bb3e323 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bbh/cot_zeroshot/boolean_expressions.yaml @@ -0,0 +1,18 @@ +"dataset_name": "boolean_expressions" +"description": "Evaluate the result of a random Boolean expression.\n\n" +"doc_to_text": "Q: {{input}}\nA: Let's think step by step." +"include": "_cot_zeroshot_template_yaml" +"task": "bbh_cot_zeroshot_boolean_expressions" + +filter_list: + - name: "flexible-extract" + filter: + - function: "regex" + group_select: -1 + regex_pattern: "\\b(True|False)\\b" + - function: "take_first" + - name: "strict-match" + filter: + - function: "regex" + regex_pattern: "((?<=The answer is )(.*)(?=.)|(?<=the answer is )(.*)(?=.)|(?<=The answer: )(.*)(?=.)|(?<=The final answer: )(.*)(?=.))" + - function: "take_first" diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bbh/cot_zeroshot/causal_judgement.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bbh/cot_zeroshot/causal_judgement.yaml new file mode 100644 index 0000000000000000000000000000000000000000..2bf47baad136dc6d44eaec82d6fdf1520c3a114b --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bbh/cot_zeroshot/causal_judgement.yaml @@ -0,0 +1,18 @@ +"dataset_name": "causal_judgement" +"description": "Answer questions about causal attribution.\n\n" +"doc_to_text": "Q: {{input}}\nA: Let's think step by step." +"include": "_cot_zeroshot_template_yaml" +"task": "bbh_cot_zeroshot_causal_judgement" + +filter_list: + - name: "flexible-extract" + filter: + - function: "regex" + group_select: -1 + regex_pattern: "\\b(Yes|No|yes|no)\\b" + - function: "take_first" + - name: "strict-match" + filter: + - function: "regex" + regex_pattern: "((?<=The answer is )(.*)(?=.)|(?<=the answer is )(.*)(?=.)|(?<=The answer: )(.*)(?=.)|(?<=The final answer: )(.*)(?=.))" + - function: "take_first" diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bbh/cot_zeroshot/date_understanding.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bbh/cot_zeroshot/date_understanding.yaml new file mode 100644 index 0000000000000000000000000000000000000000..c894b9c8ee151ef6c83a043737c5fb43de32ac03 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bbh/cot_zeroshot/date_understanding.yaml @@ -0,0 +1,20 @@ +"dataset_name": "date_understanding" +"description": "Infer the date from context.\n\n" +"doc_to_text": "Q: {{input}}\nA: Let's think step by step." +"include": "_cot_zeroshot_template_yaml" +"task": "bbh_cot_zeroshot_date_understanding" + +filter_list: + - name: "flexible-extract" + filter: + - function: !function utils.MultiChoiceRegexFilter + group_select: -1 + ignore_case: true + ignore_punctuation: true + regex_pattern: "(\\([A-Z]\\))" + - function: "take_first" + - name: "strict-match" + filter: + - function: "regex" + regex_pattern: "((?<=The answer is )(.*)(?=.)|(?<=the answer is )(.*)(?=.)|(?<=The answer: )(.*)(?=.)|(?<=The final answer: )(.*)(?=.))" + - function: "take_first" diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bbh/cot_zeroshot/formal_fallacies.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bbh/cot_zeroshot/formal_fallacies.yaml new file mode 100644 index 0000000000000000000000000000000000000000..02c7eebe8ac14e14781381235908eabcf446842a --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bbh/cot_zeroshot/formal_fallacies.yaml @@ -0,0 +1,18 @@ +"dataset_name": "formal_fallacies" +"description": "Distinguish deductively valid arguments from formal fallacies.\n\n" +"doc_to_text": "Q: {{input}}\nA: Let's think step by step." +"include": "_cot_zeroshot_template_yaml" +"task": "bbh_cot_zeroshot_formal_fallacies" + +filter_list: + - name: "flexible-extract" + filter: + - function: "regex" + group_select: -1 + regex_pattern: "\\b(valid|invalid)\\b" + - function: "take_first" + - name: "strict-match" + filter: + - function: "regex" + regex_pattern: "((?<=The answer is )(.*)(?=.)|(?<=the answer is )(.*)(?=.)|(?<=The answer: )(.*)(?=.)|(?<=The final answer: )(.*)(?=.))" + - function: "take_first" diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bbh/cot_zeroshot/geometric_shapes.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bbh/cot_zeroshot/geometric_shapes.yaml new file mode 100644 index 0000000000000000000000000000000000000000..833b93d7a31ced1132f19dd14b47bdc795b02325 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bbh/cot_zeroshot/geometric_shapes.yaml @@ -0,0 +1,20 @@ +"dataset_name": "geometric_shapes" +"description": "Name geometric shapes from their SVG paths.\n\n" +"doc_to_text": "Q: {{input}}\nA: Let's think step by step." +"include": "_cot_zeroshot_template_yaml" +"task": "bbh_cot_zeroshot_geometric_shapes" + +filter_list: + - name: "flexible-extract" + filter: + - function: !function utils.MultiChoiceRegexFilter + group_select: -1 + ignore_case: true + ignore_punctuation: true + regex_pattern: "(\\([A-Z]\\))" + - function: "take_first" + - name: "strict-match" + filter: + - function: "regex" + regex_pattern: "((?<=The answer is )(.*)(?=.)|(?<=the answer is )(.*)(?=.)|(?<=The answer: )(.*)(?=.)|(?<=The final answer: )(.*)(?=.))" + - function: "take_first" diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bbh/cot_zeroshot/hyperbaton.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bbh/cot_zeroshot/hyperbaton.yaml new file mode 100644 index 0000000000000000000000000000000000000000..152a5d1dca434012aea5d3501225f850987b2465 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bbh/cot_zeroshot/hyperbaton.yaml @@ -0,0 +1,20 @@ +"dataset_name": "hyperbaton" +"description": "Order adjectives correctly in English sentences.\n\n" +"doc_to_text": "Q: {{input}}\nA: Let's think step by step." +"include": "_cot_zeroshot_template_yaml" +"task": "bbh_cot_zeroshot_hyperbaton" + +filter_list: + - name: "flexible-extract" + filter: + - function: !function utils.MultiChoiceRegexFilter + group_select: -1 + ignore_case: true + ignore_punctuation: true + regex_pattern: "(\\([A-Z]\\))" + - function: "take_first" + - name: "strict-match" + filter: + - function: "regex" + regex_pattern: "((?<=The answer is )(.*)(?=.)|(?<=the answer is )(.*)(?=.)|(?<=The answer: )(.*)(?=.)|(?<=The final answer: )(.*)(?=.))" + - function: "take_first" diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bbh/cot_zeroshot/logical_deduction_five_objects.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bbh/cot_zeroshot/logical_deduction_five_objects.yaml new file mode 100644 index 0000000000000000000000000000000000000000..946030a0062d9697b4c6e72f236b21971c5e28b4 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bbh/cot_zeroshot/logical_deduction_five_objects.yaml @@ -0,0 +1,19 @@ +"dataset_name": "logical_deduction_five_objects" +"description": "A logical deduction task which requires deducing the order of a sequence of objects.\n\n" +"doc_to_text": "Q: {{input}}\nA: Let's think step by step." +"include": "_cot_zeroshot_template_yaml" +"task": "bbh_cot_zeroshot_logical_deduction_five_objects" +filter_list: + - name: "flexible-extract" + filter: + - function: !function utils.MultiChoiceRegexFilter + group_select: -1 + ignore_case: true + ignore_punctuation: true + regex_pattern: "(\\([A-Z]\\))" + - function: "take_first" + - name: "strict-match" + filter: + - function: "regex" + regex_pattern: "((?<=The answer is )(.*)(?=.)|(?<=the answer is )(.*)(?=.)|(?<=The answer: )(.*)(?=.)|(?<=The final answer: )(.*)(?=.))" + - function: "take_first" diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bbh/cot_zeroshot/logical_deduction_seven_objects.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bbh/cot_zeroshot/logical_deduction_seven_objects.yaml new file mode 100644 index 0000000000000000000000000000000000000000..f92f4bc5aaf86db30f4decaeee2f374b76107028 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bbh/cot_zeroshot/logical_deduction_seven_objects.yaml @@ -0,0 +1,19 @@ +"dataset_name": "logical_deduction_seven_objects" +"description": "A logical deduction task which requires deducing the order of a sequence of objects.\n\n" +"doc_to_text": "Q: {{input}}\nA: Let's think step by step." +"include": "_cot_zeroshot_template_yaml" +"task": "bbh_cot_zeroshot_logical_deduction_seven_objects" +filter_list: + - name: "flexible-extract" + filter: + - function: !function utils.MultiChoiceRegexFilter + group_select: -1 + ignore_case: true + ignore_punctuation: true + regex_pattern: "(\\([A-Z]\\))" + - function: "take_first" + - name: "strict-match" + filter: + - function: "regex" + regex_pattern: "((?<=The answer is )(.*)(?=.)|(?<=the answer is )(.*)(?=.)|(?<=The answer: )(.*)(?=.)|(?<=The final answer: )(.*)(?=.))" + - function: "take_first" diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bbh/cot_zeroshot/logical_deduction_three_objects.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bbh/cot_zeroshot/logical_deduction_three_objects.yaml new file mode 100644 index 0000000000000000000000000000000000000000..d1451828848c37156e53177765ce6941ff67b6eb --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bbh/cot_zeroshot/logical_deduction_three_objects.yaml @@ -0,0 +1,19 @@ +"dataset_name": "logical_deduction_three_objects" +"description": "A logical deduction task which requires deducing the order of a sequence of objects.\n\n" +"doc_to_text": "Q: {{input}}\nA: Let's think step by step." +"include": "_cot_zeroshot_template_yaml" +"task": "bbh_cot_zeroshot_logical_deduction_three_objects" +filter_list: + - name: "flexible-extract" + filter: + - function: !function utils.MultiChoiceRegexFilter + group_select: -1 + ignore_case: true + ignore_punctuation: true + regex_pattern: "(\\([A-Z]\\))" + - function: "take_first" + - name: "strict-match" + filter: + - function: "regex" + regex_pattern: "((?<=The answer is )(.*)(?=.)|(?<=the answer is )(.*)(?=.)|(?<=The answer: )(.*)(?=.)|(?<=The final answer: )(.*)(?=.))" + - function: "take_first" diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bbh/cot_zeroshot/movie_recommendation.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bbh/cot_zeroshot/movie_recommendation.yaml new file mode 100644 index 0000000000000000000000000000000000000000..c1b68b8b881ca929d284094fa129bca064bc08e4 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bbh/cot_zeroshot/movie_recommendation.yaml @@ -0,0 +1,19 @@ +"dataset_name": "movie_recommendation" +"description": "Recommend movies similar to the given list of movies.\n\n" +"doc_to_text": "Q: {{input}}\nA: Let's think step by step." +"include": "_cot_zeroshot_template_yaml" +"task": "bbh_cot_zeroshot_movie_recommendation" +filter_list: + - name: "flexible-extract" + filter: + - function: !function utils.MultiChoiceRegexFilter + group_select: -1 + ignore_case: true + ignore_punctuation: true + regex_pattern: "(\\([A-Z]\\))" + - function: "take_first" + - name: "strict-match" + filter: + - function: "regex" + regex_pattern: "((?<=The answer is )(.*)(?=.)|(?<=the answer is )(.*)(?=.)|(?<=The answer: )(.*)(?=.)|(?<=The final answer: )(.*)(?=.))" + - function: "take_first" diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bbh/cot_zeroshot/multistep_arithmetic_two.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bbh/cot_zeroshot/multistep_arithmetic_two.yaml new file mode 100644 index 0000000000000000000000000000000000000000..9b8f6d7228b76d74d7eff09ead513bd1eb81d4a1 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bbh/cot_zeroshot/multistep_arithmetic_two.yaml @@ -0,0 +1,18 @@ +"dataset_name": "multistep_arithmetic_two" +"description": "Solve multi-step arithmetic problems.\n\n" +"doc_to_text": "Q: {{input}}\nA: Let's think step by step." +"include": "_cot_zeroshot_template_yaml" +"task": "bbh_cot_zeroshot_multistep_arithmetic_two" + +filter_list: + - name: "flexible-extract" + filter: + - function: !function utils.NumberParseRegexFilter + group_select: -1 + regex_pattern: "([-0-9]+)" + - function: "take_first" + - name: "strict-match" + filter: + - function: "regex" + regex_pattern: "((?<=The answer is )(.*)(?=.)|(?<=the answer is )(.*)(?=.)|(?<=The answer: )(.*)(?=.)|(?<=The final answer: )(.*)(?=.))" + - function: "take_first" diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bbh/cot_zeroshot/navigate.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bbh/cot_zeroshot/navigate.yaml new file mode 100644 index 0000000000000000000000000000000000000000..2f1fee3159ded8988e798ab8f19f464de7ae0a69 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bbh/cot_zeroshot/navigate.yaml @@ -0,0 +1,17 @@ +"dataset_name": "navigate" +"description": "Given a series of navigation instructions, determine whether one would end up back at the starting point.\n\n" +"doc_to_text": "Q: {{input}}\nA: Let's think step by step." +"include": "_cot_zeroshot_template_yaml" +"task": "bbh_cot_zeroshot_navigate" +filter_list: + - name: "flexible-extract" + filter: + - function: "regex" + group_select: -1 + regex_pattern: "\\b(Yes|No|yes|no)\\b" + - function: "take_first" + - name: "strict-match" + filter: + - function: "regex" + regex_pattern: "((?<=The answer is )(.*)(?=.)|(?<=the answer is )(.*)(?=.)|(?<=The answer: )(.*)(?=.)|(?<=The final answer: )(.*)(?=.))" + - function: "take_first" diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bbh/cot_zeroshot/penguins_in_a_table.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bbh/cot_zeroshot/penguins_in_a_table.yaml new file mode 100644 index 0000000000000000000000000000000000000000..1268962e3109170d8c4fb1c52240b7221c8853d8 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bbh/cot_zeroshot/penguins_in_a_table.yaml @@ -0,0 +1,19 @@ +"dataset_name": "penguins_in_a_table" +"description": "Answer questions about a table of penguins and their attributes.\n\n" +"doc_to_text": "Q: {{input}}\nA: Let's think step by step." +"include": "_cot_zeroshot_template_yaml" +"task": "bbh_cot_zeroshot_penguins_in_a_table" +filter_list: + - name: "flexible-extract" + filter: + - function: !function utils.MultiChoiceRegexFilter + group_select: -1 + ignore_case: true + ignore_punctuation: true + regex_pattern: "(\\([A-Z]\\))" + - function: "take_first" + - name: "strict-match" + filter: + - function: "regex" + regex_pattern: "((?<=The answer is )(.*)(?=.)|(?<=the answer is )(.*)(?=.)|(?<=The answer: )(.*)(?=.)|(?<=The final answer: )(.*)(?=.))" + - function: "take_first" diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bbh/cot_zeroshot/reasoning_about_colored_objects.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bbh/cot_zeroshot/reasoning_about_colored_objects.yaml new file mode 100644 index 0000000000000000000000000000000000000000..9f9b3e1c92a47603c825d54242903a45d13ebcd9 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bbh/cot_zeroshot/reasoning_about_colored_objects.yaml @@ -0,0 +1,19 @@ +"dataset_name": "reasoning_about_colored_objects" +"description": "Answer extremely simple questions about the colors of objects on a surface.\n\n" +"doc_to_text": "Q: {{input}}\nA: Let's think step by step." +"include": "_cot_zeroshot_template_yaml" +"task": "bbh_cot_zeroshot_reasoning_about_colored_objects" +filter_list: + - name: "flexible-extract" + filter: + - function: !function utils.MultiChoiceRegexFilter + group_select: -1 + ignore_case: true + ignore_punctuation: true + regex_pattern: "(\\([A-Z]\\))" + - function: "take_first" + - name: "strict-match" + filter: + - function: "regex" + regex_pattern: "((?<=The answer is )(.*)(?=.)|(?<=the answer is )(.*)(?=.)|(?<=The answer: )(.*)(?=.)|(?<=The final answer: )(.*)(?=.))" + - function: "take_first" diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bbh/cot_zeroshot/salient_translation_error_detection.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bbh/cot_zeroshot/salient_translation_error_detection.yaml new file mode 100644 index 0000000000000000000000000000000000000000..d7d72eadc3bbd2c026c9a62dc237f90c725dacf7 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bbh/cot_zeroshot/salient_translation_error_detection.yaml @@ -0,0 +1,19 @@ +"dataset_name": "salient_translation_error_detection" +"description": "Detect the type of error in an English translation of a German source sentence.\n\n" +"doc_to_text": "Q: {{input}}\nA: Let's think step by step." +"include": "_cot_zeroshot_template_yaml" +"task": "bbh_cot_zeroshot_salient_translation_error_detection" +filter_list: + - name: "flexible-extract" + filter: + - function: !function utils.MultiChoiceRegexFilter + group_select: -1 + ignore_case: true + ignore_punctuation: true + regex_pattern: "(\\([A-Z]\\))" + - function: "take_first" + - name: "strict-match" + filter: + - function: "regex" + regex_pattern: "((?<=The answer is )(.*)(?=.)|(?<=the answer is )(.*)(?=.)|(?<=The answer: )(.*)(?=.)|(?<=The final answer: )(.*)(?=.))" + - function: "take_first" diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bbh/cot_zeroshot/sports_understanding.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bbh/cot_zeroshot/sports_understanding.yaml new file mode 100644 index 0000000000000000000000000000000000000000..1046bfe81928a4f09bddadd03a9062704c5dc357 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bbh/cot_zeroshot/sports_understanding.yaml @@ -0,0 +1,21 @@ +"dataset_name": "sports_understanding" +"description": "Determine whether an artificially constructed sentence relating to sports is plausible or not.\n\n" +"doc_to_text": "Q: {{input}}\nA: Let's think step by step." +"include": "_cot_zeroshot_template_yaml" +"task": "bbh_cot_zeroshot_sports_understanding" + +filter_list: + - name: "flexible-extract" + filter: + - function: !function utils.MapRegexFilter + group_select: -1 + ignore_case: true + regex_pattern_to_value: + \b(no|not plausible)\b: "no" + \b(yes|plausible)\b: "yes" + - function: "take_first" + - name: "strict-match" + filter: + - function: "regex" + regex_pattern: "((?<=The answer is )(.*)(?=.)|(?<=the answer is )(.*)(?=.)|(?<=The answer: )(.*)(?=.)|(?<=The final answer: )(.*)(?=.))" + - function: "take_first" diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bbh/cot_zeroshot/temporal_sequences.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bbh/cot_zeroshot/temporal_sequences.yaml new file mode 100644 index 0000000000000000000000000000000000000000..c7b949ada5ad2a8293869ed3c29fff9b419e0870 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bbh/cot_zeroshot/temporal_sequences.yaml @@ -0,0 +1,19 @@ +"dataset_name": "temporal_sequences" +"description": "Task description: Answer questions about which times certain events could have occurred.\n\n" +"doc_to_text": "Q: {{input}}\nA: Let's think step by step." +"include": "_cot_zeroshot_template_yaml" +"task": "bbh_cot_zeroshot_temporal_sequences" +filter_list: + - name: "flexible-extract" + filter: + - function: !function utils.MultiChoiceRegexFilter + group_select: -1 + ignore_case: true + ignore_punctuation: true + regex_pattern: "(\\([A-Z]\\))" + - function: "take_first" + - name: "strict-match" + filter: + - function: "regex" + regex_pattern: "((?<=The answer is )(.*)(?=.)|(?<=the answer is )(.*)(?=.)|(?<=The answer: )(.*)(?=.)|(?<=The final answer: )(.*)(?=.))" + - function: "take_first" diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bbh/cot_zeroshot/tracking_shuffled_objects_five_objects.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bbh/cot_zeroshot/tracking_shuffled_objects_five_objects.yaml new file mode 100644 index 0000000000000000000000000000000000000000..589253017ff284a00cda4261d085557d3b97068f --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bbh/cot_zeroshot/tracking_shuffled_objects_five_objects.yaml @@ -0,0 +1,19 @@ +"dataset_name": "tracking_shuffled_objects_five_objects" +"description": "A task requiring determining the final positions of a set of objects given their initial positions and a description of a sequence of swaps.\n\n" +"doc_to_text": "Q: {{input}}\nA: Let's think step by step." +"include": "_cot_zeroshot_template_yaml" +"task": "bbh_cot_zeroshot_tracking_shuffled_objects_five_objects" +filter_list: + - name: "flexible-extract" + filter: + - function: !function utils.MultiChoiceRegexFilter + group_select: -1 + ignore_case: true + ignore_punctuation: true + regex_pattern: "(\\([A-Z]\\))" + - function: "take_first" + - name: "strict-match" + filter: + - function: "regex" + regex_pattern: "((?<=The answer is )(.*)(?=.)|(?<=the answer is )(.*)(?=.)|(?<=The answer: )(.*)(?=.)|(?<=The final answer: )(.*)(?=.))" + - function: "take_first" diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bbh/cot_zeroshot/tracking_shuffled_objects_seven_objects.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bbh/cot_zeroshot/tracking_shuffled_objects_seven_objects.yaml new file mode 100644 index 0000000000000000000000000000000000000000..4635d7cabaa250aa1c255c8d9d80cf8f8c87e9b6 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bbh/cot_zeroshot/tracking_shuffled_objects_seven_objects.yaml @@ -0,0 +1,19 @@ +"dataset_name": "tracking_shuffled_objects_seven_objects" +"description": "A task requiring determining the final positions of a set of objects given their initial positions and a description of a sequence of swaps.\n\n" +"doc_to_text": "Q: {{input}}\nA: Let's think step by step." +"include": "_cot_zeroshot_template_yaml" +"task": "bbh_cot_zeroshot_tracking_shuffled_objects_seven_objects" +filter_list: + - name: "flexible-extract" + filter: + - function: !function utils.MultiChoiceRegexFilter + group_select: -1 + ignore_case: true + ignore_punctuation: true + regex_pattern: "(\\([A-Z]\\))" + - function: "take_first" + - name: "strict-match" + filter: + - function: "regex" + regex_pattern: "((?<=The answer is )(.*)(?=.)|(?<=the answer is )(.*)(?=.)|(?<=The answer: )(.*)(?=.)|(?<=The final answer: )(.*)(?=.))" + - function: "take_first" diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bbh/cot_zeroshot/tracking_shuffled_objects_three_objects.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bbh/cot_zeroshot/tracking_shuffled_objects_three_objects.yaml new file mode 100644 index 0000000000000000000000000000000000000000..1edf0c0537c71510cf781ae1acbab9829eeed883 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bbh/cot_zeroshot/tracking_shuffled_objects_three_objects.yaml @@ -0,0 +1,19 @@ +"dataset_name": "tracking_shuffled_objects_three_objects" +"description": "A task requiring determining the final positions of a set of objects given their initial positions and a description of a sequence of swaps.\n\n" +"doc_to_text": "Q: {{input}}\nA: Let's think step by step." +"include": "_cot_zeroshot_template_yaml" +"task": "bbh_cot_zeroshot_tracking_shuffled_objects_three_objects" +filter_list: + - name: "flexible-extract" + filter: + - function: !function utils.MultiChoiceRegexFilter + group_select: -1 + ignore_case: true + ignore_punctuation: true + regex_pattern: "(\\([A-Z]\\))" + - function: "take_first" + - name: "strict-match" + filter: + - function: "regex" + regex_pattern: "((?<=The answer is )(.*)(?=.)|(?<=the answer is )(.*)(?=.)|(?<=The answer: )(.*)(?=.)|(?<=The final answer: )(.*)(?=.))" + - function: "take_first" diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bbh/cot_zeroshot/web_of_lies.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bbh/cot_zeroshot/web_of_lies.yaml new file mode 100644 index 0000000000000000000000000000000000000000..a3227304a29461497f6c9acd08965dda481b95f5 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bbh/cot_zeroshot/web_of_lies.yaml @@ -0,0 +1,20 @@ +"dataset_name": "web_of_lies" +"description": "Evaluate a random boolean function expressed as a word problem.\n\n" +"doc_to_text": "Q: {{input}}\nA: Let's think step by step." +"include": "_cot_zeroshot_template_yaml" +"task": "bbh_cot_zeroshot_web_of_lies" +filter_list: + - name: "flexible-extract" + filter: + - function: !function utils.MapRegexFilter + group_select: -1 + ignore_case: true + regex_pattern_to_value: + \b(no|does not tell the truth|is not telling the truth)\b: "no" + \b(yes|tells the truth|is telling the truth)\b: "yes" + - function: "take_first" + - name: "strict-match" + filter: + - function: "regex" + regex_pattern: "((?<=The answer is )(.*)(?=.)|(?<=the answer is )(.*)(?=.)|(?<=The answer: )(.*)(?=.)|(?<=The final answer: )(.*)(?=.))" + - function: "take_first" diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bbh/cot_zeroshot/word_sorting.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bbh/cot_zeroshot/word_sorting.yaml new file mode 100644 index 0000000000000000000000000000000000000000..258add09a083b508197d5ea614f388d6cca53a40 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bbh/cot_zeroshot/word_sorting.yaml @@ -0,0 +1,15 @@ +"dataset_name": "word_sorting" +"description": "Sort a list of words.\n\n" +"doc_to_text": "Q: {{input}}\nA: Let's think step by step." +"include": "_cot_zeroshot_template_yaml" +"task": "bbh_cot_zeroshot_word_sorting" +filter_list: + - name: "flexible-extract" + filter: + - function: !function utils.WordSortFilter + - function: "take_first" + - name: "strict-match" + filter: + - function: "regex" + regex_pattern: "((?<=The answer is )(.*)(?=.)|(?<=the answer is )(.*)(?=.)|(?<=The answer: )(.*)(?=.)|(?<=The final answer: )(.*)(?=.))" + - function: "take_first" diff --git a/lm-evaluation/build/lib/lm_eval/tasks/coqa/README.md b/lm-evaluation/build/lib/lm_eval/tasks/coqa/README.md new file mode 100644 index 0000000000000000000000000000000000000000..77347e4fd8430ddc1fd7411be84a770d64f9096f --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/coqa/README.md @@ -0,0 +1,43 @@ +# CoQA + +### Paper + +Title: `CoQA: A Conversational Question Answering Challenge` + +Abstract: https://arxiv.org/pdf/1808.07042.pdf + +CoQA is a large-scale dataset for building Conversational Question Answering +systems. The goal of the CoQA challenge is to measure the ability of machines to +understand a text passage and answer a series of interconnected questions that +appear in a conversation. + +Homepage: https://stanfordnlp.github.io/coqa/ + +### Citation + +``` +BibTeX-formatted citation goes here +``` + +### Groups and Tasks + +#### Groups + +* Not part of a group yet + +#### Tasks + +* `coqa` + +### Checklist + +For adding novel benchmarks/datasets to the library: +* [ ] Is the task an existing benchmark in the literature? + * [ ] Have you referenced the original paper that introduced the task? + * [ ] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test? + + +If other tasks on this dataset are already supported: +* [ ] Is the "Main" variant of this task clearly denoted? +* [ ] Have you provided a short sentence in a README on what each new variant adds / evaluates? +* [ ] Have you noted which, if any, published evaluation setups are matched by this variant? diff --git a/lm-evaluation/build/lib/lm_eval/tasks/coqa/default.yaml b/lm-evaluation/build/lib/lm_eval/tasks/coqa/default.yaml new file mode 100644 index 0000000000000000000000000000000000000000..de398c242d04dfd823c32c5fbbb3c3796355d3f6 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/coqa/default.yaml @@ -0,0 +1,24 @@ +task: coqa +dataset_path: EleutherAI/coqa +output_type: generate_until +training_split: train +validation_split: validation +doc_to_text: !function utils.doc_to_text +doc_to_target: !function utils.doc_to_target +process_results: !function utils.process_results +should_decontaminate: true +doc_to_decontamination_query: "{{story}} {{question.input_text|join('\n')}}" +generation_kwargs: + until: + - "\nQ:" +metric_list: + - metric: em + aggregation: mean + higher_is_better: true + - metric: f1 + aggregation: mean + higher_is_better: true +metadata: + version: 3.0 +dataset_kwargs: + trust_remote_code: true diff --git a/lm-evaluation/build/lib/lm_eval/tasks/coqa/utils.py b/lm-evaluation/build/lib/lm_eval/tasks/coqa/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..29911cfec5cd345b41c631064a7e281b9d15000e --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/coqa/utils.py @@ -0,0 +1,77 @@ +from itertools import zip_longest + +import transformers.data.metrics.squad_metrics as squad_metrics + + +def doc_to_text(doc): + # Given a passage p, the conversation history {q1, a1, . . . qi−1, ai−1} + # and a question qi, the task is to predict the answer ai + doc_text = doc["story"] + "\n\n" + for q, a in zip_longest( + doc["questions"]["input_text"], doc["answers"]["input_text"][:-1] + ): # omit target answer ai + question = f"Q: {q}\n\n" + answer = f"A: {a}\n\n" if a is not None else "A:" + doc_text += question + answer + return doc_text + + +def doc_to_target(doc): + turn_id = len(doc["questions"]["input_text"]) + # Returns unique answers and valid alternatives (Some questions in CoQA have multiple valid answers). + answers = [] + answer_forturn = doc["answers"]["input_text"][turn_id - 1] + answers.append(answer_forturn) + + additional_answers = doc.get("additional_answers") + if additional_answers: + for key in additional_answers: + additional_answer_for_turn = additional_answers[key]["input_text"][ + turn_id - 1 + ] + if additional_answer_for_turn.lower() not in map(str.lower, answers): + answers.append(additional_answer_for_turn) + return answers + + +def em(gold_list, pred): + # tests for exact match and on the normalised answer (compute_exact) + em_sum = 0.0 + if len(gold_list) > 1: + for i in range(len(gold_list)): + gold_answers = gold_list[0:i] + gold_list[i + 1 :] + # predictions compared against (n) golds and take maximum + em_sum += max(squad_metrics.compute_exact(a, pred) for a in gold_answers) + else: + em_sum += max(squad_metrics.compute_exact(a, pred) for a in gold_list) + + return em_sum / max(1, len(gold_list)) + + +def compute_scores(gold_list, pred): + # tests for exact match and on the normalised answer (compute_exact) + # test for overlap (compute_f1) + f1_sum = 0.0 + em_sum = 0.0 + if len(gold_list) > 1: + for i in range(len(gold_list)): + gold_answers = gold_list[0:i] + gold_list[i + 1 :] + # predictions compared against (n) golds and take maximum + em_sum += max(squad_metrics.compute_exact(a, pred) for a in gold_answers) + f1_sum += max(squad_metrics.compute_f1(a, pred) for a in gold_answers) + else: + em_sum += max(squad_metrics.compute_exact(a, pred) for a in gold_list) + f1_sum += max(squad_metrics.compute_f1(a, pred) for a in gold_list) + + return { + "em": em_sum / max(1, len(gold_list)), + "f1": f1_sum / max(1, len(gold_list)), + } + + +def process_results(doc, results): + gold_list = doc_to_target(doc) + pred = results[0].strip().split("\n")[0] + + scores = compute_scores(gold_list, pred) + return scores diff --git a/lm-evaluation/build/lib/lm_eval/tasks/eq_bench/README.md b/lm-evaluation/build/lib/lm_eval/tasks/eq_bench/README.md new file mode 100644 index 0000000000000000000000000000000000000000..472890bdc832705e55f7a28209a74ea2af6b9865 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/eq_bench/README.md @@ -0,0 +1,55 @@ +# EQ-Bench + +Title: `EQ-Bench: An Emotional Intelligence Benchmark for Large Language Models` + +Abstract: https://arxiv.org/abs/2312.06281 + +EQ-Bench is a benchmark for language models designed to assess emotional intelligence. + +Why emotional intelligence? One reason is that it represents a subset of abilities that are important for the user experience, and which isn't explicitly tested by other benchmarks. Another reason is that it's not trivial to improve scores by fine tuning for the benchmark, which makes it harder to "game" the leaderboard. + +EQ-Bench is a little different from traditional psychometric tests. It uses a specific question format, in which the subject has to read a dialogue then rate the intensity of possible emotional responses of one of the characters. Every question is interpretative and assesses the ability to predict the magnitude of the 4 presented emotions. The test is graded without the need for a judge (so there is no length bias). It's cheap to run (only 171 questions), and produces results that correlate strongly with human preference (Arena ELO) and multi-domain benchmarks like MMLU. + +Homepage: https://eqbench.com/ + + +NOTE: There are some key differences between the lm-evaluation-harness version and the implementation described in the EQ-Bench paper (These have been OK'd by the author): + +- The lm-eval version uses the EQ-Bench v2 test set (171 questions) and score calculation. It does not incorporate the revision part of the prompt, as per v2.1 (https://github.com/EQ-bench/EQ-Bench) +- No retries in lm-eval version (EQ-Bench pipeline retries with successively higher temps if it encounters unparseable answers) +- In the original implementation, unparseable answers are excluded from the final score, and 83% of answers have to be parseable or a fail is returned. The lm-eval version instead assigns 0 to unparsable answers and has no fail criteria. So for lower performing models, there may be differences with the EQ-Bench leaderboard. + + +### Citation + +```bibtex +@misc{paech2023eqbench, + title={EQ-Bench: An Emotional Intelligence Benchmark for Large Language Models}, + author={Samuel J. Paech}, + year={2023}, + eprint={2312.06281}, + archivePrefix={arXiv}, + primaryClass={cs.CL} +} +``` + +### Groups and Tasks + +#### Groups + +* Not part of a group yet + +#### Tasks + +* `eq_bench` + +### Checklist + +* [x] Is the task an existing benchmark in the literature? + * [x] Have you referenced the original paper that introduced the task? + * [ ] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test? + +If other tasks on this dataset are already supported: +* [ ] Is the "Main" variant of this task clearly denoted? +* [ ] Have you provided a short sentence in a README on what each new variant adds / evaluates? +* [ ] Have you noted which, if any, published evaluation setups are matched by this variant? diff --git a/lm-evaluation/build/lib/lm_eval/tasks/eq_bench/default.yaml b/lm-evaluation/build/lib/lm_eval/tasks/eq_bench/default.yaml new file mode 100644 index 0000000000000000000000000000000000000000..16b1245b22c91e74a4ab398945a27ac31c82c5a8 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/eq_bench/default.yaml @@ -0,0 +1,20 @@ +task: eq_bench +dataset_path: pbevan11/EQ-Bench +output_type: generate_until +validation_split: validation +doc_to_text: prompt +doc_to_target: reference_answer_fullscale +process_results: !function utils.calculate_score_fullscale +generation_kwargs: + do_sample: false + temperature: 0.0 + max_gen_toks: 80 +metric_list: + - metric: eqbench + aggregation: mean + higher_is_better: true + - metric: percent_parseable + aggregation: mean + higher_is_better: true +metadata: + version: 2.1 diff --git a/lm-evaluation/build/lib/lm_eval/tasks/eq_bench/utils.py b/lm-evaluation/build/lib/lm_eval/tasks/eq_bench/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..326a0dc485f22c01053c10e65bc9bf05e1aeb590 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/eq_bench/utils.py @@ -0,0 +1,54 @@ +import math +import re + + +def calculate_score_fullscale(docs, results): + reference = eval(docs["reference_answer_fullscale"]) + user = dict(re.findall(r"(\w+):\s+(\d+)", results[0])) + # First check that the emotions specified in the answer match those in the reference + if len(user.items()) != 4: + # print('! Error: 4 emotions were not returned') + # print(user) + return {"eqbench": 0, "percent_parseable": 0} + emotions_dict = {} + for emotion, user_emotion_score in user.items(): + for i in range(1, 5): + if emotion == reference[f"emotion{i}"]: + emotions_dict[emotion] = True + if len(emotions_dict) != 4: + print("! Error: emotions did not match reference") + print(user) + return {"eqbench": 0, "percent_parseable": 0} + + difference_tally = ( + 0 # Tally of differerence from reference answers for this question + ) + + # Iterate over each emotion in the user's answers. + for emotion, user_emotion_score in user.items(): + # If this emotion is in the reference, calculate the difference between the user's score and the reference score. + for i in range(1, 5): + if emotion == reference[f"emotion{i}"]: + d = abs( + float(user_emotion_score) - float(reference[f"emotion{i}_score"]) + ) + # this will be a value between 0 and 10 + if d == 0: + scaled_difference = 0 + elif d <= 5: + # S-shaped scaling function + # https://www.desmos.com/calculator + # 6.5\cdot\ \frac{1}{\left(1\ +\ e^{\left(-1.2\cdot\left(x-4\right)\right)}\right)} + scaled_difference = 6.5 * (1 / (1 + math.e ** (-1.2 * (d - 4)))) + + else: + scaled_difference = d + difference_tally += scaled_difference + + # Inverting the difference tally so that the closer the answer is to reference, the higher the score. + # The adjustment constant is chosen such that answering randomly produces a score of zero. + adjust_const = 0.7477 + final_score = 10 - (difference_tally * adjust_const) + final_score_percent = final_score * 10 + + return {"eqbench": final_score_percent, "percent_parseable": 100} diff --git a/lm-evaluation/build/lib/lm_eval/tasks/fld/README.md b/lm-evaluation/build/lib/lm_eval/tasks/fld/README.md new file mode 100644 index 0000000000000000000000000000000000000000..1c7d88e3df69a6690c9da2c897cdf0b3d7311e05 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/fld/README.md @@ -0,0 +1,64 @@ +# FLD + +### Paper + +Title: Learning Deductive Reasoning from Synthetic Corpus based on Formal Logic + +Abstract: https://arxiv.org/abs/2308.07336 + +**FLD** (**F**ormal **L**ogic **D**eduction) is a deductive reasoning benchmark. +Given a set of facts and a hypothesis, an LLM is required to generate (i) proof steps to (dis-)prove the hypothesis, and (ii) an answer ("proved", "disproved" or unknown"). + +Unique features of FLD are: +* It assesses the model's logical reasoning ability *isolated from knowledge*, as the facts are randomly constructed so that referring to existing knowledge never helps solve the task. +* It assesses diverse reasoning patterns (i.e., deduction rules), as it is based on formal logic theory. +* As a result, it is highly challenging. Indeed, even GPT-4 can solve only about half of the problems. + +Homepage: https://github.com/hitachi-nlp/FLD + + +### Citation + +``` +@InProceedings{pmlr-v202-morishita23a, + title = {Learning Deductive Reasoning from Synthetic Corpus based on Formal Logic}, + author = {Morishita, Terufumi and Morio, Gaku and Yamaguchi, Atsuki and Sogawa, Yasuhiro}, + booktitle = {Proceedings of the 40th International Conference on Machine Learning}, + pages = {25254--25274}, + year = {2023}, + editor = {Krause, Andreas and Brunskill, Emma and Cho, Kyunghyun and Engelhardt, Barbara and Sabato, Sivan and Scarlett, Jonathan}, + volume = {202}, + series = {Proceedings of Machine Learning Research}, + month = {23--29 Jul}, + publisher = {PMLR}, + pdf = {https://proceedings.mlr.press/v202/morishita23a/morishita23a.pdf}, + url = {https://proceedings.mlr.press/v202/morishita23a.html}, +} +``` + +### Groups and Tasks + +#### Groups + +* `fld` + +#### Tasks + +This release is the simplified version of FLD where a model is required to predict only an answer. +This setting is described by "answer accuracy" in the original paper. + +* `fld_default` is a basic task based on [FLD.v2](https://huggingface.co/datasets/hitachi-nlp/FLD.v2/viewer/star) +* `fld_star`: is a more challenging version based on [FLD.v2-star](https://huggingface.co/datasets/hitachi-nlp/FLD.v2/viewer/star) + +### Checklist + +For adding novel benchmarks/datasets to the library: +* [x] Is the task an existing benchmark in the literature? + * [x] Have you referenced the original paper that introduced the task? + * [x] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test? + + +If other tasks on this dataset are already supported: +* [ ] Is the "Main" variant of this task clearly denoted? +* [ ] Have you provided a short sentence in a README on what each new variant adds / evaluates? +* [ ] Have you noted which, if any, published evaluation setups are matched by this variant? diff --git a/lm-evaluation/build/lib/lm_eval/tasks/fld/fld_default.yaml b/lm-evaluation/build/lib/lm_eval/tasks/fld/fld_default.yaml new file mode 100644 index 0000000000000000000000000000000000000000..233a3564a3ffb6d207dd397103a27bd37c43dc22 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/fld/fld_default.yaml @@ -0,0 +1,21 @@ +group: + - fld +task: fld_default +dataset_path: hitachi-nlp/FLD.v2 +dataset_name: default +training_split: train +validation_split: validation +test_split: test +doc_to_text: "Based on the provided facts ($context$), either prove or disprove the hypothesis or state that it is unknown. {{prompt_serial}}" +doc_to_target: world_assump_label +metric_list: + - metric: exact_match + aggregation: mean + higher_is_better: true +filter_list: + - name: remove_whitespace + filter: + - function: remove_whitespace + - function: take_first +metadata: + version: 2.0 diff --git a/lm-evaluation/build/lib/lm_eval/tasks/fld/fld_star.yaml b/lm-evaluation/build/lib/lm_eval/tasks/fld/fld_star.yaml new file mode 100644 index 0000000000000000000000000000000000000000..750e808c780001e4659c9def75400f8a2460045e --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/fld/fld_star.yaml @@ -0,0 +1,3 @@ +include: fld_default.yaml +task: fld_star +dataset_name: star diff --git a/lm-evaluation/build/lib/lm_eval/tasks/kobest/README.md b/lm-evaluation/build/lib/lm_eval/tasks/kobest/README.md new file mode 100644 index 0000000000000000000000000000000000000000..5a160da77140f37244dde849f42ab5b3f223a0a4 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/kobest/README.md @@ -0,0 +1,37 @@ +# LAMBADA + +### Paper +Title: `KOBEST: Korean Balanced Evaluation of Significant Tasks` + +Abstract: https://arxiv.org/abs/2204.04541 + +A well-formulated benchmark plays a critical role in spurring advancements in the natural language processing (NLP) field, as it allows objective and precise evaluation of diverse models. As modern language models (LMs) have become more elaborate and sophisticated, more difficult benchmarks that require linguistic knowledge and reasoning have been proposed. However, most of these benchmarks only support English, and great effort is necessary to construct benchmarks for other low resource languages. To this end, we propose a new benchmark named Korean balanced evaluation of significant tasks (KoBEST), which consists of five Korean-language downstream tasks. Professional Korean linguists designed the tasks that require advanced Korean linguistic knowledge. Moreover, our data is purely annotated by humans and thoroughly reviewed to guarantee high data quality. We also provide baseline models and human performance results. Our dataset is available on the Huggingface. + + +Homepage: https://huggingface.co/datasets/skt/kobest_v1 + +### Groups and Tasks + +#### Groups + +- `kobest` + +#### Tasks + +- `kobest_boolq` +- `kobest_copa` +- `kobest_hallawag` +- `kobest_sentineg` +- `kobest_wic` + + +### Citation + +@misc{ + author={Dohyeong Kim, Myeongjun Jang, Deuk Sin Kwon, Eric Davis}, + title={KOBEST: Korean Balanced Evaluation of Significant Tasks}, + DOI={https://doi.org/10.48550/arXiv.2204.04541}, + publisher={arXiv}, + year={2022}, + month={Apr} +} diff --git a/lm-evaluation/build/lib/lm_eval/tasks/kobest/kobest_boolq.yaml b/lm-evaluation/build/lib/lm_eval/tasks/kobest/kobest_boolq.yaml new file mode 100644 index 0000000000000000000000000000000000000000..e9932d56a9300f31bd96a1cd14ee2df091005b21 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/kobest/kobest_boolq.yaml @@ -0,0 +1,23 @@ +group: + - kobest +task: kobest_boolq +dataset_path: skt/kobest_v1 +dataset_name: boolq +output_type: multiple_choice +training_split: train +validation_split: validation +test_split: test +doc_to_text: "{{paragraph}} 질문: {{question}} 답변: " +doc_to_target: "{{label}}" +doc_to_choice: ["아니오", "예"] +metric_list: + - metric: acc + aggregation: mean + higher_is_better: True + - metric: f1 + aggregation: !function utils.macro_f1_score + average: macro + hf_evaluate: true + higher_is_better: True +metadata: + version: 1.0 diff --git a/lm-evaluation/build/lib/lm_eval/tasks/kobest/kobest_copa.yaml b/lm-evaluation/build/lib/lm_eval/tasks/kobest/kobest_copa.yaml new file mode 100644 index 0000000000000000000000000000000000000000..1f3b34e61fad86a037010dd892fd7b894346f456 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/kobest/kobest_copa.yaml @@ -0,0 +1,23 @@ +group: + - kobest +task: kobest_copa +dataset_path: skt/kobest_v1 +dataset_name: copa +output_type: multiple_choice +training_split: train +validation_split: validation +test_split: test +doc_to_text: !function utils.copa_doc_to_text +doc_to_target: !function utils.copa_doc_to_target +doc_to_choice: !function utils.copa_doc_to_choice +metric_list: + - metric: acc + aggregation: mean + higher_is_better: True + - metric: f1 + aggregation: !function utils.macro_f1_score + average: macro + hf_evaluate: true + higher_is_better: True +metadata: + version: 1.0 diff --git a/lm-evaluation/build/lib/lm_eval/tasks/kobest/kobest_hellaswag.yaml b/lm-evaluation/build/lib/lm_eval/tasks/kobest/kobest_hellaswag.yaml new file mode 100644 index 0000000000000000000000000000000000000000..d83266a813ecd5a9ffd1989d45ac4c49b5779558 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/kobest/kobest_hellaswag.yaml @@ -0,0 +1,27 @@ +group: + - kobest +task: kobest_hellaswag +dataset_path: skt/kobest_v1 +dataset_name: hellaswag +training_split: train +validation_split: validation +output_type: multiple_choice +test_split: test +doc_to_text: "{{query}}" +doc_to_target: "{{label}}" +process_docs: !function utils.hellaswag_process_doc +doc_to_choice: "choices" +metric_list: + - metric: acc + aggregation: mean + higher_is_better: True + - metric: acc_norm + aggregation: mean + higher_is_better: True + - metric: f1 + aggregation: !function utils.macro_f1_score + average: macro + hf_evaluate: true + higher_is_better: True +metadata: + version: 1.0 diff --git a/lm-evaluation/build/lib/lm_eval/tasks/kobest/kobest_sentineg.yaml b/lm-evaluation/build/lib/lm_eval/tasks/kobest/kobest_sentineg.yaml new file mode 100644 index 0000000000000000000000000000000000000000..64319dca39c520c7a8f9c4f20f0ae2a9e44b7230 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/kobest/kobest_sentineg.yaml @@ -0,0 +1,25 @@ +group: + - kobest +task: kobest_sentineg +dataset_path: skt/kobest_v1 +dataset_name: sentineg +output_type: multiple_choice +training_split: train +validation_split: validation +test_split: test +doc_to_text: !function utils.sentineg_doc_to_text +doc_to_target: "{{label}}" +doc_to_choice: ["부정", "긍정"] +metric_list: + - metric: acc + aggregation: mean + higher_is_better: True + - metric: f1 + aggregation: !function utils.macro_f1_score + average: macro + hf_evaluate: true + higher_is_better: True +metadata: + version: 1.0 +dataset_kwargs: + trust_remote_code: true diff --git a/lm-evaluation/build/lib/lm_eval/tasks/kobest/kobest_wic.yaml b/lm-evaluation/build/lib/lm_eval/tasks/kobest/kobest_wic.yaml new file mode 100644 index 0000000000000000000000000000000000000000..569d3393dbe78e1bb5d92e00d4ceac439282b9d0 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/kobest/kobest_wic.yaml @@ -0,0 +1,25 @@ +group: + - kobest +task: kobest_wic +dataset_path: skt/kobest_v1 +dataset_name: wic +output_type: multiple_choice +training_split: train +validation_split: validation +test_split: test +doc_to_text: !function utils.wic_doc_to_text +doc_to_target: "{{label}}" +doc_to_choice: ['아니오', '예'] +metric_list: + - metric: acc + aggregation: mean + higher_is_better: True + - metric: f1 + aggregation: !function utils.macro_f1_score + average: macro + hf_evaluate: true + higher_is_better: True +metadata: + version: 1.0 +dataset_kwargs: + trust_remote_code: true diff --git a/lm-evaluation/build/lib/lm_eval/tasks/kobest/utils.py b/lm-evaluation/build/lib/lm_eval/tasks/kobest/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..9799ef038c09a67f92a2b174d57f5aaefa05a32f --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/kobest/utils.py @@ -0,0 +1,48 @@ +from datasets import Dataset +from sklearn.metrics import f1_score + + +def copa_doc_to_text(doc: dict) -> str: + connector = {"원인": " 왜냐하면", "결과": " 그래서"}[doc["question"].strip()] + return f"""{doc["premise"]} {connector}""" + + +def copa_doc_to_target(doc: dict) -> str: + correct_choice = doc["alternative_1"] if doc["label"] == 0 else doc["alternative_2"] + return f"""{correct_choice}""" + + +def copa_doc_to_choice(doc: dict) -> list: + return [f"""{doc["alternative_1"]}""", f"""{doc["alternative_2"]}"""] + + +def sentineg_doc_to_text(doc: dict): + return f"""문장: {doc["sentence"]} 긍부정:""" + + +def wic_doc_to_text(doc: dict) -> str: + return f"""문장1: {doc["context_1"]} 문장2: {doc["context_2"]} 두 문장에서 {doc["word"]}가 같은 뜻으로 쓰였나?""" + + +def hellaswag_process_doc(doc: Dataset) -> Dataset: + def preprocessor(dataset): + return { + "query": f"""문장: {dataset["context"]}""", + "choices": [ + dataset["ending_1"], + dataset["ending_2"], + dataset["ending_3"], + dataset["ending_4"], + ], + "gold": int(dataset["label"]), + } + + return doc.map(preprocessor) + + +def macro_f1_score(items): + unzipped_list = list(zip(*items)) + golds = unzipped_list[0] + preds = unzipped_list[1] + fscore = f1_score(golds, preds, average="macro") + return fscore diff --git a/lm-evaluation/build/lib/lm_eval/tasks/lambada_multilingual/README.md b/lm-evaluation/build/lib/lm_eval/tasks/lambada_multilingual/README.md new file mode 100644 index 0000000000000000000000000000000000000000..3845c95ca3839bea04e6d49d4a373515012f78da --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/lambada_multilingual/README.md @@ -0,0 +1,49 @@ +# LAMBADA + +### Paper +The LAMBADA dataset: Word prediction requiring a broad discourse context +https://arxiv.org/pdf/1606.06031.pdf + +LAMBADA is a dataset to evaluate the capabilities of computational models for text +understanding by means of a word prediction task. LAMBADA is a collection of narrative +passages sharing the characteristic that human subjects are able to guess their last +word if they are exposed to the whole passage, but not if they only see the last +sentence preceding the target word. To succeed on LAMBADA, computational models +cannot simply rely on local context, but must be able to keep track of information +in the broader discourse. + +Homepage: https://zenodo.org/record/2630551#.X4Xzn5NKjUI + +### Citation + +@misc{ + author={Paperno, Denis and Kruszewski, Germán and Lazaridou, Angeliki and Pham, Quan Ngoc and Bernardi, Raffaella and Pezzelle, Sandro and Baroni, Marco and Boleda, Gemma and Fernández, Raquel}, + title={The LAMBADA dataset}, + DOI={10.5281/zenodo.2630551}, + publisher={Zenodo}, + year={2016}, + month={Aug} +} + +### Groups and Tasks + +#### Groups + +* `lambada_multilingual`: Evaluates all `lambada_mt_X` tasks + +#### Tasks + +* `lambada_mt_{en, fr, de, it, es}`: Machine-translated versions of OpenAI's Lambada variant. + +### Checklist + +* [x] Is the task an existing benchmark in the literature? + * [x] Have you referenced the original paper that introduced the task? + * [x] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test? +(This task is novel to the Evaluation Harness, and has been checked against v0.3.0 of the harness.) + + +If other tasks on this dataset are already supported: +* [x] Is the "Main" variant of this task clearly denoted? +* [x] Have you provided a short sentence in a README on what each new variant adds / evaluates? +* [x] Have you noted which, if any, published evaluation setups are matched by this variant? diff --git a/lm-evaluation/build/lib/lm_eval/tasks/lambada_multilingual/lambada_mt_de.yaml b/lm-evaluation/build/lib/lm_eval/tasks/lambada_multilingual/lambada_mt_de.yaml new file mode 100644 index 0000000000000000000000000000000000000000..ef24c84eddbae9e24f6ce0a229b4ede28ee1e83e --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/lambada_multilingual/lambada_mt_de.yaml @@ -0,0 +1,3 @@ +include: lambada_mt_en.yaml +task: lambada_openai_mt_de +dataset_name: de diff --git a/lm-evaluation/build/lib/lm_eval/tasks/lambada_multilingual/lambada_mt_en.yaml b/lm-evaluation/build/lib/lm_eval/tasks/lambada_multilingual/lambada_mt_en.yaml new file mode 100644 index 0000000000000000000000000000000000000000..7e63a6d1bc025afaf862692b110c027008902274 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/lambada_multilingual/lambada_mt_en.yaml @@ -0,0 +1,20 @@ +group: + - lambada_multilingual +task: lambada_openai_mt_en +dataset_path: EleutherAI/lambada_openai +dataset_name: en +output_type: loglikelihood +test_split: test +doc_to_text: "{{text.split(' ')[:-1]|join(' ')}}" +doc_to_target: "{{' '+text.split(' ')[-1]}}" +should_decontaminate: true +doc_to_decontamination_query: "{{text}}" +metric_list: + - metric: perplexity + aggregation: perplexity + higher_is_better: false + - metric: acc + aggregation: mean + higher_is_better: true +metadata: + version: 1.0 diff --git a/lm-evaluation/build/lib/lm_eval/tasks/lambada_multilingual/lambada_mt_es.yaml b/lm-evaluation/build/lib/lm_eval/tasks/lambada_multilingual/lambada_mt_es.yaml new file mode 100644 index 0000000000000000000000000000000000000000..afe53b00d3eeff6b6b12549d2176fe25ba39db02 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/lambada_multilingual/lambada_mt_es.yaml @@ -0,0 +1,3 @@ +include: lambada_mt_en.yaml +task: lambada_openai_mt_es +dataset_name: es diff --git a/lm-evaluation/build/lib/lm_eval/tasks/lambada_multilingual/lambada_mt_fr.yaml b/lm-evaluation/build/lib/lm_eval/tasks/lambada_multilingual/lambada_mt_fr.yaml new file mode 100644 index 0000000000000000000000000000000000000000..dee21c03d61913c64ff56690a531fe918fe08eee --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/lambada_multilingual/lambada_mt_fr.yaml @@ -0,0 +1,3 @@ +include: lambada_mt_en.yaml +task: lambada_openai_mt_fr +dataset_name: fr diff --git a/lm-evaluation/build/lib/lm_eval/tasks/lambada_multilingual/lambada_mt_it.yaml b/lm-evaluation/build/lib/lm_eval/tasks/lambada_multilingual/lambada_mt_it.yaml new file mode 100644 index 0000000000000000000000000000000000000000..2c7a929a4ea9df369f2c82610f6c59d28fdf45bf --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/lambada_multilingual/lambada_mt_it.yaml @@ -0,0 +1,3 @@ +include: lambada_mt_en.yaml +task: lambada_openai_mt_it +dataset_name: it diff --git a/lm-evaluation/build/lib/lm_eval/tasks/prost/README.md b/lm-evaluation/build/lib/lm_eval/tasks/prost/README.md new file mode 100644 index 0000000000000000000000000000000000000000..97752fc0c669b63d9a825110c8da8779f7e3a2e2 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/prost/README.md @@ -0,0 +1,62 @@ +# PROST + +### Paper + +Title: `PROST: Physical Reasoning about Objects Through Space and Time` + +Abstract: https://arxiv.org/abs/2106.03634 + +PROST, Physical Reasoning about Objects Through Space and Time, is a dataset +consisting of 18,736 multiple-choice questions made from 14 manually curated +templates, covering 10 physical reasoning concepts. All questions are designed +to probe both causal and masked language models in a zero-shot setting. + +NOTE: PROST is limited to the zero-shot setting to adhere to authors' intentions +as discussed in section 7 of the paper: "We hope that the community will use +this dataset in the intended way: in a zero-shot setting to probe models which +have been trained on data not specifically collected to succeed on PROST." + +Homepage: https://github.com/nala-cub/prost + + +### Citation + +``` +@inproceedings{aroca-ouellette-etal-2021-prost, + title = "{PROST}: {P}hysical Reasoning about Objects through Space and Time", + author = "Aroca-Ouellette, St{\'e}phane and + Paik, Cory and + Roncone, Alessandro and + Kann, Katharina", + booktitle = "Findings of the Association for Computational Linguistics: ACL-IJCNLP 2021", + month = aug, + year = "2021", + address = "Online", + publisher = "Association for Computational Linguistics", + url = "https://aclanthology.org/2021.findings-acl.404", + pages = "4597--4608", +} +``` + +### Groups and Tasks + +#### Groups + +* Not part of a group yet. + +#### Tasks + +* `prost` + +### Checklist + +For adding novel benchmarks/datasets to the library: +* [ ] Is the task an existing benchmark in the literature? + * [ ] Have you referenced the original paper that introduced the task? + * [ ] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test? + + +If other tasks on this dataset are already supported: +* [ ] Is the "Main" variant of this task clearly denoted? +* [ ] Have you provided a short sentence in a README on what each new variant adds / evaluates? +* [ ] Have you noted which, if any, published evaluation setups are matched by this variant? diff --git a/lm-evaluation/build/lib/lm_eval/tasks/prost/corypaik_prost.yaml b/lm-evaluation/build/lib/lm_eval/tasks/prost/corypaik_prost.yaml new file mode 100644 index 0000000000000000000000000000000000000000..adf7a8d232d661627f2be03fc2fbf0d38ee07504 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/prost/corypaik_prost.yaml @@ -0,0 +1,19 @@ +task: prost +dataset_path: corypaik/prost +dataset_name: null +output_type: multiple_choice +test_split: test +doc_to_text: "{{context}}\nQuestion: {{ex_question}}\nAnswer:" +doc_to_target: label +doc_to_choice: "{{[A, B, C, D]}}" +should_decontaminate: true +doc_to_decontamination_query: "{{context}}\nQuestion: {{ex_question}}\nAnswer:" +metric_list: + - metric: acc + aggregation: mean + higher_is_better: true + - metric: acc_norm + aggregation: mean + higher_is_better: true +metadata: + version: 1.0 diff --git a/lm-evaluation/build/lib/lm_eval/utils.py b/lm-evaluation/build/lib/lm_eval/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..30de3e2506fb707fb2e9b0cd839a20b0ff78b783 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/utils.py @@ -0,0 +1,380 @@ +import collections +import fnmatch +import functools +import importlib.util +import inspect +import logging +import os +import re +from itertools import islice +from typing import Any, Callable, List + +import numpy as np +import yaml +from jinja2 import BaseLoader, Environment, StrictUndefined + + +logging.basicConfig( + format="%(asctime)s,%(msecs)03d %(levelname)-8s [%(filename)s:%(lineno)d] %(message)s", + datefmt="%Y-%m-%d:%H:%M:%S", + level=logging.INFO, +) +eval_logger = logging.getLogger("lm-eval") + +SPACING = " " * 47 + + +def escaped_split(text, sep_char, maxsplit=-1): + """Split text into a list on occurrences of the given separation + character `sep_char`. The separation character may be escaped by a + backslash to avoid splitting at that location. + + The separation character must be a string of size 1. + + If `maxsplit` is given, at most `maxsplit` splits are done (thus, + the list will have at most `maxsplit + 1` elements). If `maxsplit` + is not specified or less than 0, then there is no limit on the + number of splits (all possible splits are made). + """ + assert ( + len(sep_char) == 1 + ), "separation string must be a single character for escaped splitting" + + if maxsplit == 0: + return text + maxsplit = max(0, maxsplit) + + return re.split(r"(? so the first token has something to condition on + :return: generator + Generator of tuples + (input_tokens, pred_tokens) + Note: Score only the last len(pred_tokens) logits of the LM + """ + assert 1 <= context_len <= max_seq_len + if not token_list: + return + # +1 offset, going from input->preds + pred_len = max_seq_len - context_len + 1 + predicted = 0 + + # Special handling for first window: predict all tokens + first_seq_len = min(max_seq_len, len(token_list)) + yield ([prefix_token] + token_list[: first_seq_len - 1], token_list[:first_seq_len]) + predicted += first_seq_len + + while predicted < len(token_list): + window_pred_len = min(len(token_list) - predicted, pred_len) + window_end = predicted + window_pred_len + + yield ( + token_list[window_end - max_seq_len - 1 : window_end - 1], + token_list[window_end - window_pred_len : window_end], + ) + predicted += window_pred_len + + +def make_disjoint_window(pair): + """Takes output from get_rolling_token_windows and makes the context not overlap with the continuation""" + a, b = pair + return a[: len(a) - (len(b) - 1)], b + + +class Reorderer: + def __init__(self, arr: List[Any], fn: Callable) -> None: + """Reorder an array according to some function + + Args: + arr (List[Any]): The initial array + fn (Callable[[Any], Any]): A function to determine the priority of elements + """ + self.size = len(arr) + arr = list(enumerate(arr)) + arr = group(arr, lambda x: fn(x[1])) + # arr = [([y[0] for y in x], x[0][1]) for x in arr] + # TODO: overhaul reorderer. It currently grouped requests by content but we don't want this + arr = [([y[0]], x[0][1]) for x in arr for y in x] + arr.sort(key=lambda x: fn(x[1])) + + self.arr = arr + + def get_reordered(self): + """Gets the reordered array + + Returns: + List[Any]: The reordered array + """ + return [x[1] for x in self.arr] + + def get_original(self, newarr): + """Restores the original order of a new array based on the old array's order + + Args: + newarr (List[Any]): The array to be restored + + Returns: + List[Any]: The array restored to the original order + """ + res = [None] * self.size + cov = [False] * self.size + + for (inds, _), v in zip(self.arr, newarr): + for ind in inds: + res[ind] = v + cov[ind] = True + + assert all(cov) + + return res + + +def make_table(result_dict, column: str = "results"): + """Generate table of results.""" + from pytablewriter import LatexTableWriter, MarkdownTableWriter + + if column == "results": + column_name = "Tasks" + elif column == "groups": + column_name = "Groups" + + all_headers = [ + column_name, + "Version", + "Filter", + "n-shot", + "Metric", + "Value", + "", + "Stderr", + ] + + md_writer = MarkdownTableWriter() + latex_writer = LatexTableWriter() + md_writer.headers = all_headers + latex_writer.headers = all_headers + + values = [] + + for k, dic in result_dict[column].items(): + version = result_dict["versions"].get(k, "N/A") + n = str(result_dict["n-shot"][k]) + + if "alias" in dic: + k = dic.pop("alias") + + for (mf), v in dic.items(): + m, _, f = mf.partition(",") + if m.endswith("_stderr"): + continue + + if m + "_stderr" + "," + f in dic: + se = dic[m + "_stderr" + "," + f] + if se != "N/A": + se = "%.4f" % se + values.append([k, version, f, n, m, "%.4f" % v, "±", se]) + else: + values.append([k, version, f, n, m, "%.4f" % v, "", ""]) + k = "" + version = "" + md_writer.value_matrix = values + latex_writer.value_matrix = values + + # todo: make latex table look good + # print(latex_writer.dumps()) + + return md_writer.dumps() + + +def positional_deprecated(fn): + """ + A decorator to nudge users into passing only keyword args (`kwargs`) to the + wrapped function, `fn`. + """ + + @functools.wraps(fn) + def _wrapper(*args, **kwargs): + if len(args) != 1 if inspect.ismethod(fn) else 0: + print( + f"WARNING: using {fn.__name__} with positional arguments is " + "deprecated and will be disallowed in a future version of " + "lm-evaluation-harness!" + ) + return fn(*args, **kwargs) + + return _wrapper + + +def ignore_constructor(loader, node): + return node + + +def import_function(loader, node): + function_name = loader.construct_scalar(node) + yaml_path = os.path.dirname(loader.name) + + *module_name, function_name = function_name.split(".") + if isinstance(module_name, list): + module_name = ".".join(module_name) + module_path = os.path.normpath(os.path.join(yaml_path, "{}.py".format(module_name))) + + spec = importlib.util.spec_from_file_location(module_name, module_path) + module = importlib.util.module_from_spec(spec) + spec.loader.exec_module(module) + + function = getattr(module, function_name) + return function + + +def load_yaml_config(yaml_path=None, yaml_config=None, yaml_dir=None, mode="full"): + if mode == "simple": + constructor_fn = ignore_constructor + elif mode == "full": + constructor_fn = import_function + + # Add the import_function constructor to the YAML loader + yaml.add_constructor("!function", constructor_fn) + if yaml_config is None: + with open(yaml_path, "rb") as file: + yaml_config = yaml.full_load(file) + + if yaml_dir is None: + yaml_dir = os.path.dirname(yaml_path) + + assert yaml_dir is not None + + if "include" in yaml_config: + include_path = yaml_config["include"] + del yaml_config["include"] + + if isinstance(include_path, str): + include_path = [include_path] + + # Load from the last one first + include_path.reverse() + final_yaml_config = {} + for path in include_path: + # Assumes that path is a full path. + # If not found, assume the included yaml + # is in the same dir as the original yaml + if not os.path.isfile(path): + path = os.path.join(yaml_dir, path) + + try: + included_yaml_config = load_yaml_config(yaml_path=path, mode=mode) + final_yaml_config.update(included_yaml_config) + except Exception as ex: + # If failed to load, ignore + raise ex + + final_yaml_config.update(yaml_config) + return final_yaml_config + return yaml_config + + +def regex_replace(string, pattern, repl, count: int = 0): + """Implements the `re.sub` function as a custom Jinja filter.""" + return re.sub(pattern, repl, string, count=count) + + +env = Environment(loader=BaseLoader, undefined=StrictUndefined) +env.filters["regex_replace"] = regex_replace + + +def apply_template(template: str, doc: dict) -> str: + rtemplate = env.from_string(template) + return rtemplate.render(**doc) + + +def create_iterator(raw_iterator, *, rank=0, world_size=1, limit=None): + """ + Method for creating a (potentially) sliced and limited + iterator from a raw document iterator. Used for splitting data + among ranks in multigpu setting or only pulling a sample of documents + """ + return islice(raw_iterator, rank, limit, world_size) diff --git a/lm-evaluation/docs/new_task_guide.md b/lm-evaluation/docs/new_task_guide.md new file mode 100644 index 0000000000000000000000000000000000000000..0df7bb3b921ab4ee9b5c5cf82d92473cf9920508 --- /dev/null +++ b/lm-evaluation/docs/new_task_guide.md @@ -0,0 +1,445 @@ +# New Task Guide + +`lm-evaluation-harness` is a framework that strives to support a wide range of zero- and few-shot evaluation tasks on autoregressive language models (LMs). + +This documentation page provides a walkthrough to get started creating your own task, in `lm-eval` versions v0.4.0 and later. + +A more interactive tutorial is available as a Jupyter notebook [here](https://github.com/EleutherAI/lm-evaluation-harness/blob/main/examples/lm-eval-overview.ipynb). + +## Setup + +If you haven't already, go ahead and fork the main repo, clone it, create a branch with the name of your task, and install the project requirements in your environment: + +```sh +# After forking... +git clone https://github.com//lm-evaluation-harness.git +cd lm-evaluation-harness +git checkout -b +pip install -e ".[dev]" +``` + +In this document, we'll walk through the basics of implementing a static benchmark evaluation in two formats: a *generative* task which requires sampling text from a model, such as [`gsm8k`](https://github.com/EleutherAI/lm-evaluation-harness/blob/main/lm_eval/tasks/gsm8k/gsm8k.yaml), and a *discriminative*, or *multiple choice*, task where the model picks the most likely of several fixed answer choices, such as [`sciq`](https://github.com/EleutherAI/lm-evaluation-harness/blob/main/lm_eval/tasks/sciq/sciq.yaml). + +## Creating a YAML file + +To implement a new standard task, we'll need to write a YAML file which configures our task logic. We start by making a new empty YAML file. This file can have any name, but we recommend placing it in a subfolder of `lm_eval/tasks` titled by the dataset or task's shorthand name: for example, + +```sh +touch lm_eval/tasks//.yaml +``` +Or, copy the template subfolder we provide from `templates/new_yaml_task`: +```sh +cp -r templates/new_yaml_task lm_eval/tasks/ +``` +and rename the folders and YAML file(s) as desired. + +### Selecting and configuring a dataset + +All data downloading and management is handled through the HuggingFace (**HF**) [`datasets`](https://github.com/huggingface/datasets) API. So, the first thing you should do is check to see if your task's dataset is already provided in their catalog [here](https://huggingface.co/datasets). If it's not in there, please consider adding it to their Hub to make it accessible to a wider user base by following their [new dataset guide](https://github.com/huggingface/datasets/blob/master/ADD_NEW_DATASET.md) +. + +Once you have a HuggingFace dataset prepared for your task, we want to assign our new YAML to use this dataset: + +```yaml +dataset_path: ... # the name of the dataset on the HF Hub. +dataset_name: ... # the dataset configuration to use. Leave `null` if your dataset does not require a config to be passed. See https://huggingface.co/docs/datasets/load_hub#configurations for more info. +dataset_kwargs: null # any extra keyword arguments that should be passed to the dataset constructor, e.g. `data_dir`. +``` + +Next, we'd like to tell our task what the dataset's train, validation, and test splits are named, if they exist: + +```yaml +training_split: +validation_split: +test_split: +``` +Tests will run on the `test_split` if it is available, and otherwise evaluate on the `validation_split`. + +We can also specify from which split the task should retrieve few-shot examples via: +```yaml +fewshot_split: +``` +though if this is not set, we will default to train/validation/test sets, in that order. + + +Finally, our dataset may not be already in the exact format we want. Maybe we have to strip whitespace and special characters via a regex from our dataset's "question" field! Or maybe we just want to rename its columns to match a convention we'll be using for our prompts. + +Let's create a python file in the directory where we're writing our YAML file: +```bash +touch lm_eval/tasks//utils.py +``` +Now, in `utils.py` we'll write a function to process each split of our dataset: + +TODO: Change the example to one that's in the tasks/ + +```python +def process_docs(dataset: datasets.Dataset): + def _helper(doc): + # modifies the contents of a single + # document in our dataset. + doc["choices"] = [doc["choice1"], doc["choice2"], doc["wrong_answer"]] + doc["gold"] = doc["label"] + return doc + + return dataset.map(_helper) # returns back a datasets.Dataset object +``` + +Now, in our YAML config file we'll use the `!function` constructor, and tell the config where our imported Python function will come from. At runtime, before doing anything else we will preprocess our dataset according to this function! +```yaml +process_docs: !function utils.process_docs +``` + +### Using Local Datasets + +To load a local dataset for evaluation, you can specify data files in the `dataset_kwargs` field, such as the following for JSON files: + +``` +dataset_path: json +dataset_name: null +dataset_kwargs: + data_files: /path/to/my/json +``` +Or with files already split into separate directories: + +``` +dataset_path: arrow +dataset_kwargs: + data_files: + train: /path/to/arrow/train/data-00000-of-00001.arrow + validation: /path/to/arrow/validation/data-00000-of-00001.arrow +``` + +Alternatively, if you have previously downloaded a dataset from huggingface hub (using `save_to_disk()`) and wish to use the local files, you will need to use `data_dir` under `dataset_kwargs` to point to where the directory is. + +``` +dataset_path: hellaswag +dataset_kwargs: + data_dir: hellaswag_local/ +``` + +You can also set `dataset_path` as a directory path in your local system. This will assume that there is a loading script with the same name as the directory. [See datasets docs](https://huggingface.co/docs/datasets/loading#local-loading-script). + +## Writing a Prompt Template + +The next thing we need to do is decide what format to use when presenting the data to the LM. This is our **prompt**, where we'll define both an input and output format. + +To write a prompt, users will use `doc_to_text`, `doc_to_target`, and `doc_to_choice` (Optional when certain conditions are met). + +`doc_to_text` defines the input string a model will be given while `doc_to_target` and `doc_to_choice` will be used to generate the target text. `doc_to_target` can be either a text string that refers to the target string or an integer that refers to the index of the correct label. When it is set as an index, `doc_to_choice` must be also be set with the appropriate list of possible choice strings. + +### Basic prompts + +If a dataset is straightforward enough, users can enter the feature name directly. This assumes that no preprocessing is required. For example in [Swag](https://github.com/EleutherAI/lm-evaluation-harness/blob/1710b42d52d0f327cb0eb3cb1bfbbeca992836ca/lm_eval/tasks/swag/swag.yaml#L10-L11), `doc_to_text` and `doc_to_target` given the name of one of the feature each. +```yaml +doc_to_text: startphrase +doc_to_target: label +``` +Hard-coding is also possible as is the case in [SciQ](https://github.com/EleutherAI/lm-evaluation-harness/blob/1710b42d52d0f327cb0eb3cb1bfbbeca992836ca/lm_eval/tasks/sciq/sciq.yaml#L11). +```yaml +doc_to_target: 3 +``` +`doc_to_choice` can be directly given a list of text as option (See [Toxigen](https://github.com/EleutherAI/lm-evaluation-harness/blob/1710b42d52d0f327cb0eb3cb1bfbbeca992836ca/lm_eval/tasks/toxigen/toxigen.yaml#L11)) +```yaml +doc_to_choice: ['No', 'Yes'] +``` + +if a dataset feature is already a list, you can set the name of the feature as `doc_to_choice` (See [Hellaswag](https://github.com/EleutherAI/lm-evaluation-harness/blob/e0eda4d3ffa10e5f65e0976161cd134bec61983a/lm_eval/tasks/hellaswag/hellaswag.yaml#L13)) +``` +doc_to_choice: choices +``` + + + +### Writing a prompt with Jinja 2 + +We support the [Jinja 2](https://jinja.palletsprojects.com/en/3.1.x/) templating language for writing prompts. In practice, this means you can take your dataset's columns and do many basic string manipulations to place each document into prompted format. + +Take for example the dataset `super_glue/boolq`. As input, we'd like to use the features `passage` and `question` and string them together so that for a a sample line `doc`, the model sees something the format of: +``` +doc["passage"] +Question: doc["question"]? +Answer: +``` +We do this by [writing](https://github.com/EleutherAI/lm-evaluation-harness/blob/1710b42d52d0f327cb0eb3cb1bfbbeca992836ca/lm_eval/tasks/super_glue/boolq/default.yaml#L9C1-L9C61) +```yaml +doc_to_text: "{{passage}}\nQuestion: {{question}}?\nAnswer:" +``` +Such that `{{passage}}` will be replaced by `doc["passage"]` and `{{question}}` with `doc["question"]` when rendering the prompt template. + +Our intended output is for the model to predict a single whitespace, and then the answer to the question. We do this via: +```yaml +doc_to_target: "{{answer}}" +``` + + +**Important**: we now add `target_delimiter` between input and target which defaults to " ", such that the full input-output string is `doc_to_target(doc) + target_delimiter + doc_to_text(doc)`. doc_to_text and doc_to_target should not contain trailing right or left whitespace, respectively. + + +#### Multiple choice format + +For tasks which are multiple choice (a fixed, finite set of label words per each document) and evaluated via comparing loglikelihoods of all label words (the `multiple_choice` task output type) we enforce a particular convention on prompt format. + +An annotated example in the case of SciQ is as follows: + +```yaml +doc_to_text: "{{support.lstrip()}}\nQuestion: {{question}}\nAnswer:" # This is the input portion of the prompt for this doc. It will have " {{choice}}" appended to it as target for each choice in answer_choices. +doc_to_target: 3 # this contains the index into the answer choice list of the correct answer. +doc_to_choice: "{{[distractor1, distractor2, distractor3, correct_answer]}}" +``` +Task implementers are thus able to decide what the answer choices should be for a document, and what prompt format to use. + +The label index can also be sourced from a feature directly. For example in `superglue/boolq`, the label index if defined in the feature `label`. We can set `doc_to_target` as simply `label`. The options or verbalizers can be written in a the form of a list `["no", "yes"]` that will correspond to the label index. + +```yaml +doc_to_text: "{{passage}}\nQuestion: {{question}}?\nAnswer:" +doc_to_target: label +doc_to_choice: ["no", "yes"] +``` + +### Using Python Functions for Prompts + +There may be cases where the prompt we want to implement is easier expressed in Python instead of Jinja 2. For this, we can use Python helper functions that are defined in the YAML config. It should be noted that the function script must be in the same directory as the yaml. + +A good example is WikiText that requires a lot of regex rules to clean the samples. +``` +def wikitext_detokenizer(doc): + string = doc["page"] + # contractions + string = string.replace("s '", "s'") + string = re.sub(r"/' [0-9]/", r"/'[0-9]/", string) + ... + string = string.replace(" 's", "'s") + + return string +``` + +We can load this function in `doc_to_target` by using a `!function` operator after `doc_to_target` and followed by `.`. In the file [wikitext.yaml](https://github.com/EleutherAI/lm-evaluation-harness/blob/6ae376e3a43caa58b95bb8aa73054a94827bf560/lm_eval/tasks/wikitext/wikitext.yaml) we write: +``` +doc_to_target: !function preprocess_wikitext.wikitext_detokenizer +``` + +### Importing a Prompt from Promptsource + +[Promptsource](https://github.com/bigscience-workshop/promptsource/tree/main/promptsource) is a great repository for crowdsourced prompts for many datasets. We can load these prompts easily by using the `use_prompt` argument and filling it with the format `"promptsource:"`. To use this, `doc_to_text` and `doc_to_target` should be left undefined. This will fetch the template of the dataset defined in the YAML file. + +For example, For Super Glue BoolQ, if we want to use the prompt template `GPT-3 Style` we can add this to the YAML file. +``` +use_prompt: "promptsource:GPT-3 Style" +``` + +If you would like to run evaluation on all prompt templates, you can simply call it this way. +``` +use_prompt: "promptsource:*" +``` + +### Setting metrics + +You're almost done! Now we need to choose how to score our task. +- *If this is a multiple choice task:* do you just want to check your model's accuracy in choosing the correct answer choice? +- *If this is a generation task:* do you just want to check how often your model outputs *exactly the ground-truth output string provided*? + + +If the answer to the above is no: you'll need to record what scoring metrics to use! Metrics can be listed in the following format: + +```yaml +metric_list: + - metric: + aggregation: + higher_is_better: + - metric: !function script.function + aggregation: ... + higher_is_better: ... +``` +`aggregation` and `higher_is_better` can optionally be left out to default to the manually-set defaults if using a natively supported metric, otherwise it must be defined explicitly (for example, when using a custom metric implemented as a function). + +For a full list of natively supported metrics and aggregation functions see [`docs/task_guide.md`](https://github.com/EleutherAI/lm-evaluation-harness/blob/main/docs/task_guide.md). All metrics supported in [HuggingFace Evaluate](https://github.com/huggingface/evaluate/tree/main/metrics) can also be used, and will be loaded if a given metric name is not one natively supported in `lm-eval` or `hf_evaluate` is set to `true`. + +### Optional, More Advanced Setup + +Some tasks may require more advanced processing logic than is described in this guide. + +As a heuristic check: +* Does your task require generating multiple free-form outputs per input document? +* Does your task require complex, multi-step post-processing of generated model outputs? +* Does your task require subsetting documents on the fly based on their content? +* Do you expect to compute metrics after applying multiple such processing steps on your model outputs? +* Does your task rely on metrics that need a custom implementation? + +For more detail on the task system and advanced features, see [`docs/task_guide.md`](https://github.com/EleutherAI/lm-evaluation-harness/blob/main/docs/task_guide.md) . If none of the above sound like they apply to your task, it's time to continue onto checking your task performance! + +### Task name + groups (registering a task) + +To test a task conveniently, it helps to *register* the task--that is, to give it a name and make the `lm-eval` library aware it exists! + +If you're writing your YAML file inside the `lm_eval/tasks` folder, you just need to give your task a name! You can do this inside your YAML file: + +```yaml +task: +``` +Including a task name is mandatory. + +It is often also convenient to label your task with several `groups`, or tags, though this field is optional: + +```yaml +group: + - group1 + - group2 +``` +This will add your task to the `group1` and `group2` groups, enabling people to know how to categorize your task, and if desired run all tasks in one of these groups at once, your task along with them. + + +If your task is not in the `lm_eval/tasks` folder, you'll need to tell the Eval Harness where to look for YAML files. + +You can do this via the `--include_path` argument in `__main__.py`. This command will be used to initialize the `TaskManager` object which you can also use for your custom scripts. + +```python +task_manager = TaskManager(args.verbosity, include_path=args.include_path) +``` + +Passing `--tasks /path/to/yaml/file` is also accepted. + + +### Advanced Group Configs + +You can make more complete group config while also tailoring parameters for individual tasks. + +For example, let's build a config for evaluating MMLU and a few natural language inference tasks. For MMLU, we can write the name for the benchmark as a subtask written under `task`. You can configure the parameters such as `num_fewshot`. If the task being configured is a group such as `mmlu` or `super_glue`, the parameter set will be applied to all of the subtasks. + +```yaml +group: nli_and_mmlu +task: + - group: nli_tasks + task: + - cb + - anli_r1 + - rte + - task: mmlu + num_fewshot: 2 +``` +It's also important to note how you can basically insert a group config as a task. Here, to make a group of natural language inference tasks, you simply write like how you would normally write a group config but this time place that as part of a task list under the main group being built. + +### Duplicate Tasks in Group Configs + +There might be cases where you might want to evaluate prompts and how models perform over prompt variations. You can list an existing task (In the example below, `anli_r1`) which varying `doc_to_text` implementation. To differentiate from each variation, we can utilize `task_alias`. LM-Eval will recognize that there are multiple variations of the same tasks and differentiate them. +```yaml +group: flan_held_in +group_alias: Flan (Held-In) +task: + # ANLI R1 + - group: anli_r1_flan + group_alias: ANLI R1 + task: + - task: anli_r1 + task_alias: prompt-0 + include: _held_in_template_yaml + doc_to_text: "{{premise}}\n\nChoose your answer ..." + ... + - task: anli_r1 + task_alias: prompt-1 + include: _held_in_template_yaml + doc_to_text: "{{premise}}\n\nBased on ..." + ... +``` + +### Configuring python classes + +There can occasions when yaml-based tasks cannot accommodate how a task is handled. LM-Eval supports the manually implementing tasks as was previously done before `0.4.x`. To register the task, you can simply make a yaml with the name of the task in `task` and the class object in `class` using the `!function` prefix. + +```yaml +task: squadv2 +class: !function task.SQuAD2 +``` + +This also applies to building group configurations with subtasks that are python classes. + +```yaml +group: scrolls +task: + - task: scrolls_qasper + class: !function task.Qasper + - task: scrolls_quality + class: !function task.QuALITY + - task: scrolls_narrativeqa + class: !function task.NarrativeQA + ... +``` + +## Beautifying Table Display + +To avoid conflict, each task needs to be registered with a unique name. Because of this, slight variations of task are still counted as unique tasks and need to be named uniquely. This could be done by appending an additional naming that may refer to the variation such as in MMLU where the template used to evaluated for flan are differentiated from the default by the prefix `mmlu_flan_*`. Printing the full task names can easily clutter the results table at the end of the evaluation especially when you have a long list of tasks or are using a benchmark that comprises of many tasks. To make it more legible, you can use `task_alias` and `group_alias` to provide an alternative task name and group name that will be printed. +`` +for example in `mmlu_abstract_algebra.yaml` we set `group_alias` to `stem` and `task_alias` to `abstract_algebra`. + +``` +"dataset_name": "abstract_algebra" +"description": "The following are multiple choice questions (with answers) about abstract\ + \ algebra.\n\n" +"group": "mmlu_stem" +"group_alias": "stem" +"include": "_default_template_yaml" +"task": "mmlu_abstract_algebra" +"task_alias": "abstract_algebra" +``` +Note: Even though `group` can be a list, for now, `group_alias` can only be a single string. + +## Checking validity + +After registering your task, you can now check on your data downloading and verify that the few-shot samples look as intended. Run the following command with your desired args: + +```bash +python -m scripts.write_out \ + --output_base_path \ + --tasks \ + --sets \ + --num_fewshot K \ + --num_examples N \ +``` + +Open the file specified at the `--output_base_path ` and ensure it passes +a simple eye test. + +## Versioning + +One key feature in LM Evaluation Harness is the ability to version tasks--that is, mark them with a specific version number that can be bumped whenever a breaking change is made. + +This version info can be provided by adding the following to your new task config file: + +``` +metadata: + version: 0 +``` + +Now, whenever a change needs to be made to your task in the future, please increase the version number by 1 so that users can differentiate the different task iterations and versions. + +If you are incrementing a task's version, please also consider adding a changelog to the task's README.md noting the date, PR number, what version you have updated to, and a one-liner describing the change. + +for example, + +* \[Dec 25, 2023\] (PR #999) Version 0.0 -> 1.0: Fixed a bug with answer extraction that led to underestimated performance. + +## Checking performance + equivalence + +It's now time to check models' performance on your task! In the evaluation harness, we intend to support a wide range of evaluation tasks and setups, but prioritize the inclusion of already-proven benchmarks following the precise evaluation setups in the literature where possible. + +To enable this, we provide a checklist that should be completed when contributing a new task, to enable accurate book-keeping and to ensure that tasks added to the library are well-tested and, where applicable, precedented. + +### Task Validity Checklist + +The checklist is the following: + +For adding novel benchmarks/datasets to the library: +* [ ] Is the task an existing benchmark in the literature? + * [ ] Have you referenced the original paper that introduced the task? + * [ ] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test? + + +If other tasks on this dataset are already supported: +* [ ] Is the "Main" variant of this task clearly denoted? +* [ ] Have you provided a short sentence in a README on what each new variant adds / evaluates? +* [ ] Have you noted which, if any, published evaluation setups are matched by this variant? + +It is recommended to include a filled-out copy of this checklist in the README.md for the subfolder you are creating, if you have created a new subfolder in `lm_eval/tasks`. + +## Submitting your task + +You're all set! Now push your work and make a pull request to the `main` branch! Thanks for the contribution :). If there are any questions, please leave a message in the `#lm-thunderdome` channel on the EAI discord! diff --git a/lm-evaluation/tests/testdata/ethics_utilitarianism-v0-res.json b/lm-evaluation/tests/testdata/ethics_utilitarianism-v0-res.json new file mode 100644 index 0000000000000000000000000000000000000000..857af346b47d7ce11ee4192b928608a2111776f4 --- /dev/null +++ b/lm-evaluation/tests/testdata/ethics_utilitarianism-v0-res.json @@ -0,0 +1 @@ +{"results": {"ethics_utilitarianism": {"acc": 0.49771214642262895, "acc_stderr": 0.007211546310787838}}, "versions": {"ethics_utilitarianism": 0}} \ No newline at end of file diff --git a/lm-evaluation/tests/testdata/gguf_test_52ea409606de8755e03cf7c79f824101a4ce64bb6e6d3df556b8a4e7a5d92418.pkl b/lm-evaluation/tests/testdata/gguf_test_52ea409606de8755e03cf7c79f824101a4ce64bb6e6d3df556b8a4e7a5d92418.pkl new file mode 100644 index 0000000000000000000000000000000000000000..f468bb46d3da891a285c615b25de9b2d99a7fd8d --- /dev/null +++ b/lm-evaluation/tests/testdata/gguf_test_52ea409606de8755e03cf7c79f824101a4ce64bb6e6d3df556b8a4e7a5d92418.pkl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e4f122bfaa24901cff1ee686da0cf49ade7b6877c31a3daeb32c8cf2e328a77e +size 153 diff --git a/lm-evaluation/tests/testdata/hendrycksTest-anatomy-v0-loglikelihood b/lm-evaluation/tests/testdata/hendrycksTest-anatomy-v0-loglikelihood new file mode 100644 index 0000000000000000000000000000000000000000..a7ae5fa705e58cf0e7c06ca0fe84a186d24b506f --- /dev/null +++ b/lm-evaluation/tests/testdata/hendrycksTest-anatomy-v0-loglikelihood @@ -0,0 +1 @@ +bf05e04ed8cf61cf3aad294ed3f5a16137775ffdd20f1b129022ddffc1251768 \ No newline at end of file diff --git a/lm-evaluation/tests/testdata/piqa-v0-loglikelihood b/lm-evaluation/tests/testdata/piqa-v0-loglikelihood new file mode 100644 index 0000000000000000000000000000000000000000..b01b1fe5d8c699f855bff57061d6d63715c7f058 --- /dev/null +++ b/lm-evaluation/tests/testdata/piqa-v0-loglikelihood @@ -0,0 +1 @@ +6048a3a2bb3ad1e6a3d98139618e06b4d7de766edd685bd38837596199c3f69f \ No newline at end of file diff --git a/lm-evaluation/tests/testdata/textsynth_test_9d5f33dbfe1e254928c89f5ed85e4c010d888065f55a8f1b863bc1eb0340a5f2.pkl b/lm-evaluation/tests/testdata/textsynth_test_9d5f33dbfe1e254928c89f5ed85e4c010d888065f55a8f1b863bc1eb0340a5f2.pkl new file mode 100644 index 0000000000000000000000000000000000000000..46e0dbbbda5bd6902bd4cd205d59976e71a3b0fa --- /dev/null +++ b/lm-evaluation/tests/testdata/textsynth_test_9d5f33dbfe1e254928c89f5ed85e4c010d888065f55a8f1b863bc1eb0340a5f2.pkl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a06027365696479403ecb4abb20da8ea6befb2aba6d0098f1dae42df661b542d +size 1813