import argparse import json import logging import os import re import sys from functools import partial from pathlib import Path from typing import Union import numpy as np from lm_eval import evaluator, utils from lm_eval.evaluator import request_caching_arg_to_dict from lm_eval.logging_utils import WandbLogger from lm_eval.tasks import TaskManager from lm_eval.utils import make_table, simple_parse_args_string DEFAULT_RESULTS_FILE = "results.json" def _handle_non_serializable(o): if isinstance(o, np.int64) or isinstance(o, np.int32): return int(o) elif isinstance(o, set): return list(o) else: return str(o) def _int_or_none_list_arg_type(max_len: int, value: str, split_char: str = ","): def parse_value(item): item = item.strip().lower() if item == "none": return None try: return int(item) except ValueError: raise argparse.ArgumentTypeError(f"{item} is not an integer or None") items = [parse_value(v) for v in value.split(split_char)] num_items = len(items) if num_items == 1: # Makes downstream handling the same for single and multiple values items = items * max_len elif num_items != max_len: raise argparse.ArgumentTypeError( f"Argument requires {max_len} integers or None, separated by '{split_char}'" ) return items def check_argument_types(parser: argparse.ArgumentParser): """ Check to make sure all CLI args are typed, raises error if not """ for action in parser._actions: if action.dest != "help" and not action.const: if action.type is None: raise ValueError( f"Argument '{action.dest}' doesn't have a type specified." ) else: continue def setup_parser() -> argparse.ArgumentParser: parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter) parser.add_argument( "--model", "-m", type=str, default="hf", help="Name of model e.g. `hf`" ) parser.add_argument( "--tasks", "-t", default=None, type=str, metavar="task1,task2", help="To get full list of tasks, use the command lm-eval --tasks list", ) parser.add_argument( "--model_args", "-a", default="", type=str, help="Comma separated string arguments for model, e.g. `pretrained=EleutherAI/pythia-160m,dtype=float32`", ) parser.add_argument( "--num_fewshot", "-f", type=int, default=None, metavar="N", help="Number of examples in few-shot context", ) parser.add_argument( "--batch_size", "-b", type=str, default=1, metavar="auto|auto:N|N", help="Acceptable values are 'auto', 'auto:N' or N, where N is an integer. Default 1.", ) parser.add_argument( "--max_batch_size", type=int, default=None, metavar="N", help="Maximal batch size to try with --batch_size auto.", ) parser.add_argument( "--device", type=str, default=None, help="Device to use (e.g. cuda, cuda:0, cpu).", ) parser.add_argument( "--output_path", "-o", default=None, type=str, metavar="DIR|DIR/file.json", help="The path to the output file where the result metrics will be saved. If the path is a directory and log_samples is true, the results will be saved in the directory. Else the parent directory will be used.", ) parser.add_argument( "--limit", "-L", type=float, default=None, metavar="N|0 argparse.Namespace: check_argument_types(parser) return parser.parse_args() def cli_evaluate(args: Union[argparse.Namespace, None] = None) -> None: if not args: # we allow for args to be passed externally, else we parse them ourselves parser = setup_parser() args = parse_eval_args(parser) if args.wandb_args: wandb_logger = WandbLogger(**simple_parse_args_string(args.wandb_args)) #run = wandb.init(project='eval',group='exp1') eval_logger = utils.eval_logger eval_logger.setLevel(getattr(logging, f"{args.verbosity}")) eval_logger.info(f"Verbosity set to {args.verbosity}") os.environ["TOKENIZERS_PARALLELISM"] = "false" if args.predict_only: args.log_samples = True if (args.log_samples or args.predict_only) and not args.output_path: raise ValueError( "Specify --output_path if providing --log_samples or --predict_only" ) if args.include_path is not None: eval_logger.info(f"Including path: {args.include_path}") task_manager = TaskManager(args.verbosity, include_path=args.include_path) if args.limit: eval_logger.warning( " --limit SHOULD ONLY BE USED FOR TESTING." "REAL METRICS SHOULD NOT BE COMPUTED USING LIMIT." ) if args.tasks is None: eval_logger.error("Need to specify task to evaluate.") sys.exit() elif args.tasks == "list": eval_logger.info( "Available Tasks:\n - {}".format("\n - ".join(task_manager.all_tasks)) ) sys.exit() else: if os.path.isdir(args.tasks): import glob task_names = [] yaml_path = os.path.join(args.tasks, "*.yaml") for yaml_file in glob.glob(yaml_path): config = utils.load_yaml_config(yaml_file) task_names.append(config) else: task_list = args.tasks.split(",") task_names = task_manager.match_tasks(task_list) for task in [task for task in task_list if task not in task_names]: if os.path.isfile(task): config = utils.load_yaml_config(task) task_names.append(config) task_missing = [ task for task in task_list if task not in task_names and "*" not in task ] # we don't want errors if a wildcard ("*") task name was used if task_missing: missing = ", ".join(task_missing) eval_logger.error( f"Tasks were not found: {missing}\n" f"{utils.SPACING}Try `lm-eval --tasks list` for list of available tasks", ) raise ValueError( f"Tasks not found: {missing}. Try `lm-eval --tasks list` for list of available tasks, or '--verbosity DEBUG' to troubleshoot task registration issues." ) if args.output_path: path = Path(args.output_path) # check if file or 'dir/results.json' exists if path.is_file(): raise FileExistsError(f"File already exists at {path}") output_path_file = path.joinpath(DEFAULT_RESULTS_FILE) if output_path_file.is_file(): eval_logger.warning( f"File {output_path_file} already exists. Results will be overwritten." ) # if path json then get parent dir elif path.suffix in (".json", ".jsonl"): output_path_file = path path.parent.mkdir(parents=True, exist_ok=True) path = path.parent else: path.mkdir(parents=True, exist_ok=True) # Respect user's value passed in via CLI, otherwise default to True and add to comma-separated model args if args.trust_remote_code: os.environ["HF_DATASETS_TRUST_REMOTE_CODE"] = str(args.trust_remote_code) args.model_args = ( args.model_args + f",trust_remote_code={os.environ['HF_DATASETS_TRUST_REMOTE_CODE']}" ) eval_logger.info(f"Selected Tasks: {task_names}") request_caching_args = request_caching_arg_to_dict( cache_requests=args.cache_requests ) results = evaluator.simple_evaluate( model=args.model, model_args=args.model_args, tasks=task_names, num_fewshot=args.num_fewshot, batch_size=args.batch_size, max_batch_size=args.max_batch_size, device=args.device, use_cache=args.use_cache, limit=args.limit, check_integrity=args.check_integrity, write_out=args.write_out, log_samples=args.log_samples, gen_kwargs=args.gen_kwargs, task_manager=task_manager, verbosity=args.verbosity, predict_only=args.predict_only, random_seed=args.seed[0], numpy_random_seed=args.seed[1], torch_random_seed=args.seed[2], **request_caching_args, ) if results is not None: if args.log_samples: samples = results.pop("samples") dumped = json.dumps( results, indent=2, default=_handle_non_serializable, ensure_ascii=False ) if args.show_config: print(dumped) batch_sizes = ",".join(map(str, results["config"]["batch_sizes"])) # Add W&B logging if args.wandb_args: try: wandb_logger.post_init(results) wandb_logger.log_eval_result() if args.log_samples: wandb_logger.log_eval_samples(samples) except Exception as e: eval_logger.info(f"Logging to Weights and Biases failed due to {e}") if args.output_path: output_path_file.open("w", encoding="utf-8").write(dumped) if args.log_samples: for task_name, config in results["configs"].items(): output_name = "{}_{}".format( re.sub(r"[\"<>:/\|\\?\*\[\]]+", "__", args.model_args), task_name, ) filename = path.joinpath(f"{output_name}.jsonl") samples_dumped = json.dumps( samples[task_name], indent=2, default=_handle_non_serializable, ensure_ascii=False, ) filename.write_text(samples_dumped, encoding="utf-8") print( f"{args.model} ({args.model_args}), gen_kwargs: ({args.gen_kwargs}), limit: {args.limit}, num_fewshot: {args.num_fewshot}, " f"batch_size: {args.batch_size}{f' ({batch_sizes})' if batch_sizes else ''}" ) print(make_table(results)) if "groups" in results: print(make_table(results, "groups")) if args.wandb_args: # Tear down wandb run once all the logging is done. wandb_logger.run.finish() if __name__ == "__main__": cli_evaluate()