diff --git a/.gitattributes b/.gitattributes index 502cfb1cd79adc5dfbf9443277ac221aa0ce0890..f6a0ca5e6039fb62e6da0497952dcfff0b7c8833 100644 --- a/.gitattributes +++ b/.gitattributes @@ -128,3 +128,4 @@ lm-evaluation-harness/wandb/run-20240605_140919-mkdnls2x/run-mkdnls2x.wandb filt lm-evaluation-harness/wandb/run-20240605_093020-laxetjfu/run-laxetjfu.wandb filter=lfs diff=lfs merge=lfs -text lm-evaluation-harness/wandb/run-20240606_045127-vb760voe/run-vb760voe.wandb filter=lfs diff=lfs merge=lfs -text venv/lib/python3.10/site-packages/nvidia/cublas/lib/libcublasLt.so.12 filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/torch/lib/libtorch_cuda.so filter=lfs diff=lfs merge=lfs -text diff --git a/lm-evaluation-harness/lm_eval/api/__init__.py b/lm-evaluation-harness/lm_eval/api/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/lm-evaluation-harness/lm_eval/api/__pycache__/__init__.cpython-310.pyc b/lm-evaluation-harness/lm_eval/api/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2651e8cf9d1a468cc54035d017ab7b194bc8eaef Binary files /dev/null and b/lm-evaluation-harness/lm_eval/api/__pycache__/__init__.cpython-310.pyc differ diff --git a/lm-evaluation-harness/lm_eval/api/__pycache__/filter.cpython-310.pyc b/lm-evaluation-harness/lm_eval/api/__pycache__/filter.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5ddb4bc2c73042b1c1a1cb44c0b02a2c2813e687 Binary files /dev/null and b/lm-evaluation-harness/lm_eval/api/__pycache__/filter.cpython-310.pyc differ diff --git a/lm-evaluation-harness/lm_eval/api/__pycache__/instance.cpython-310.pyc b/lm-evaluation-harness/lm_eval/api/__pycache__/instance.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..575c4b545df213514a76e57b9b17d395ef487d94 Binary files /dev/null and b/lm-evaluation-harness/lm_eval/api/__pycache__/instance.cpython-310.pyc differ diff --git a/lm-evaluation-harness/lm_eval/api/__pycache__/metrics.cpython-310.pyc b/lm-evaluation-harness/lm_eval/api/__pycache__/metrics.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cbf44641a6a467f6ec4b3d14f27069dc00ed3670 Binary files /dev/null and b/lm-evaluation-harness/lm_eval/api/__pycache__/metrics.cpython-310.pyc differ diff --git a/lm-evaluation-harness/lm_eval/api/__pycache__/model.cpython-310.pyc b/lm-evaluation-harness/lm_eval/api/__pycache__/model.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e3d34457f1c2cbb1d1143796bac8d622cb13dcb4 Binary files /dev/null and b/lm-evaluation-harness/lm_eval/api/__pycache__/model.cpython-310.pyc differ diff --git a/lm-evaluation-harness/lm_eval/api/__pycache__/registry.cpython-310.pyc b/lm-evaluation-harness/lm_eval/api/__pycache__/registry.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ec8d24ce2d5c50edeba7edbfbe59673889c3d261 Binary files /dev/null and b/lm-evaluation-harness/lm_eval/api/__pycache__/registry.cpython-310.pyc differ diff --git a/lm-evaluation-harness/lm_eval/api/__pycache__/samplers.cpython-310.pyc b/lm-evaluation-harness/lm_eval/api/__pycache__/samplers.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fd5f98c1efc9f3eccca74f7bb8141bd69a05a72c Binary files /dev/null and b/lm-evaluation-harness/lm_eval/api/__pycache__/samplers.cpython-310.pyc differ diff --git a/lm-evaluation-harness/lm_eval/api/__pycache__/task.cpython-310.pyc b/lm-evaluation-harness/lm_eval/api/__pycache__/task.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4bbcef25a84a6136a2fd5b7e408b678b5fddaff1 Binary files /dev/null and b/lm-evaluation-harness/lm_eval/api/__pycache__/task.cpython-310.pyc differ diff --git a/lm-evaluation-harness/lm_eval/api/filter.py b/lm-evaluation-harness/lm_eval/api/filter.py new file mode 100644 index 0000000000000000000000000000000000000000..8d9db6821724c497c4a27116a1238e3b8d32ae29 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/api/filter.py @@ -0,0 +1,56 @@ +from abc import ABC, abstractmethod +from dataclasses import dataclass +from typing import Callable, Iterable, List, Union + +from lm_eval.api.instance import Instance + + +class Filter(ABC): + """ + Filter classes operate on a per-task level. + They take all model outputs (`instance.resps` for all `task.instances`) + across all instances of a task, and perform operations. + In a single run, one can configure any number of separate filters or lists of filters. + + """ + + def __init__(self, **kwargs) -> None: + """ + Can define custom behavior here, if an individual instantiation of a Filter class should have state. + """ + + @abstractmethod + def apply(self, resps: Union[List, Iterable], docs: List[dict]) -> Iterable: + """ + Defines the operation to perform on a list of the `inst.resps` properties of `Instance` objects. + Should return the list of (filtered) response lists *in the same order as they were input*, e.g. + if pass in [, ] should return + [, ] + """ + return resps + + +@dataclass +class FilterEnsemble: + """ + FilterEnsemble creates a pipeline applying multiple filters. + Its intended usage is to stack multiple post-processing steps in order. + `task.apply_filters` should use a list of FilterEnsemble classes that it stores, to apply each + pipeline separately. + """ + + name: str + filters: List[Callable[[], Filter]] + + def apply(self, instances: List[Instance]) -> None: + resps, docs = zip(*((inst.resps, inst.doc) for inst in instances)) + resps, docs = list(resps), list(docs) + + for f in self.filters: + # apply filters in sequence + resps = f().apply(resps, docs) + + # add the end results after filtering to filtered_requests of their respective source instances. + # has key `self.name`: each FilterEnsemble applied in a given run should use a different name. + for inst, resp in zip(instances, resps): + inst.filtered_resps[self.name] = resp diff --git a/lm-evaluation-harness/lm_eval/api/instance.py b/lm-evaluation-harness/lm_eval/api/instance.py new file mode 100644 index 0000000000000000000000000000000000000000..d3c6afa0644e729ba441728c72a2469fdad07b8f --- /dev/null +++ b/lm-evaluation-harness/lm_eval/api/instance.py @@ -0,0 +1,38 @@ +from dataclasses import dataclass, field +from typing import Literal, Optional, Tuple + + +OutputType = Literal[ + "loglikelihood", "loglikelihood_rolling", "generate_until", "multiple_choice" +] + + +@dataclass +class Instance: + request_type: OutputType + doc: dict + arguments: tuple + idx: int + metadata: Tuple[Optional[str], Optional[int], Optional[int]] = field( + default_factory=lambda: (None, None, None) + ) + resps: list = field(default_factory=list) + filtered_resps: dict = field(default_factory=dict) + + # initialized after init + task_name: Optional[str] = None + doc_id: Optional[int] = None + repeats: Optional[int] = None + + def __post_init__(self) -> None: + # unpack metadata field + self.task_name, self.doc_id, self.repeats = self.metadata + + @property + def args(self): + """ + Returns (string,) where `string` is the string to calculate loglikelihood over + """ + return ( + self.arguments if isinstance(self.arguments, tuple) else (self.arguments,) + ) diff --git a/lm-evaluation-harness/lm_eval/api/metrics.py b/lm-evaluation-harness/lm_eval/api/metrics.py new file mode 100644 index 0000000000000000000000000000000000000000..acc70234b1972d7b6d5d8e2cff7d5bd70470ace9 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/api/metrics.py @@ -0,0 +1,509 @@ +import logging +import math +import random +from collections.abc import Iterable +from typing import List + +import evaluate as hf_evaluate +import numpy as np +import sacrebleu +import sklearn.metrics + +from lm_eval.api.registry import register_aggregation, register_metric + + +eval_logger = logging.getLogger("lm-eval") + + +# Register Aggregations First +@register_aggregation("bypass") +def bypass_agg(arr): + return 999 + + +@register_aggregation("mean") +def mean(arr): + return sum(arr) / len(arr) + + +@register_aggregation("median") +def median(arr): + return arr[len(arr) // 2] + + +# Certain metrics must be calculated across all documents in a benchmark. +# We use them as aggregation metrics, paired with no-op passthrough metric fns. +@register_aggregation("perplexity") +def perplexity(items): + return math.exp(-mean(items)) + + +@register_aggregation("weighted_perplexity") +def weighted_perplexity(items): + return math.exp(-weighted_mean(items)) + + +@register_aggregation("bits_per_byte") +def bits_per_byte(items): + return -weighted_mean(items) / math.log(2) + + +@register_aggregation("f1") +def f1_score(items): + unzipped_list = list(zip(*items)) + golds = unzipped_list[0] + preds = unzipped_list[1] + fscore = sklearn.metrics.f1_score(golds, preds) + + return np.max(fscore) + + +@register_aggregation("matthews_corrcoef") +def matthews_corrcoef(items): + unzipped_list = list(zip(*items)) + golds = unzipped_list[0] + preds = unzipped_list[1] + # print(preds) + return sklearn.metrics.matthews_corrcoef(golds, preds) + + +@register_aggregation("bleu") +def bleu(items): + """The Bilingual Evaluation Understudy Score, or BLEU for short, is a metric + for evaluating a generated sentence to a reference sentence. It counts matching + n-grams in the candidate translation to n-grams in the reference text, where + 1-gram or unigram would be each token and a bigram comparison would be each + word pair. The comparison is made regardless of word order + Source: https://machinelearningmastery.com/calculate-bleu-score-for-text-python/ + Paper: https://www.aclweb.org/anthology/P02-1040/ + + Higher is better + """ + refs = list(zip(*items))[0] + preds = list(zip(*items))[1] + refs, preds = _sacreformat(refs, preds) + return sacrebleu.corpus_bleu(preds, refs).score + + +@register_aggregation("chrf") +def chrf(items): + """chrF++ is a tool for automatic evaluation of machine translation output + based on character n-gram precision and recall enhanced with word n-grams. + Source: https://github.com/m-popovic/chrF + Paper: https://www.aclweb.org/anthology/W15-3049.pdf + + Higher is better # TODO I think + """ + refs = list(zip(*items))[0] + preds = list(zip(*items))[1] + refs, preds = _sacreformat(refs, preds) + return sacrebleu.corpus_chrf(preds, refs).score + + +@register_aggregation("ter") +def ter(items): + """Translation Error Rate is an error metric for machine translation that + measures the number of edits required to change a system output into one + of the references + Source: http://www.cs.umd.edu/~snover/tercom/ + Paper: http://mt-archive.info/AMTA-2006-Snover.pdf + + Lower is better + """ + refs = list(zip(*items))[0] + preds = list(zip(*items))[1] + refs, preds = _sacreformat(refs, preds) + return sacrebleu.corpus_ter(preds, refs).score + + +@register_aggregation("brier_score") +def brier_score(items): # This is a passthrough function + gold, predictions = list(zip(*items)) + gold = list(gold) + gold_one_hot = np.eye(np.max(gold) + 1)[gold] + predictions = list(zip(*items))[1] + return np.mean(np.sum((predictions - gold_one_hot) ** 2, axis=1)) + + +@register_metric( + metric="brier_score", + higher_is_better=False, + output_type=["multiple_choice"], + aggregation="brier_score", +) +def brier_score_fn(items): # This is a passthrough function + return items + + +@register_metric( + metric="acc", + higher_is_better=True, + output_type=["loglikelihood", "multiple_choice"], + aggregation="mean", +) +def acc_fn(items): # This is a passthrough function + return items + + +@register_metric( + metric="acc_norm", + higher_is_better=True, + output_type=["loglikelihood", "multiple_choice"], + aggregation="mean", +) +def acc_norm_fn(items): # This is a passthrough function + return items + + +@register_metric( + metric="acc_mutual_info", + higher_is_better=True, + output_type="multiple_choice", + aggregation="mean", +) +def acc_mutual_info_fn(items): # This is a passthrough function + return items + + +exact_match = hf_evaluate.load("exact_match") + + +@register_metric( + metric="exact_match", + higher_is_better=True, + output_type="generate_until", + aggregation="mean", +) +def exact_match_fn(**kwargs): + return exact_match.compute(**kwargs) + + +@register_metric( + metric="perplexity", + higher_is_better=False, + output_type="loglikelihood", + aggregation="perplexity", +) +def perplexity_fn(items): # This is a passthrough function + return items + + +@register_metric( + metric="word_perplexity", + higher_is_better=False, + output_type="loglikelihood_rolling", + aggregation="weighted_perplexity", +) +def word_perplexity_fn(items): # This is a passthrough function + return items + + +@register_metric( + metric="byte_perplexity", + higher_is_better=False, + output_type="loglikelihood_rolling", + aggregation="weighted_perplexity", +) +def byte_perplexity_fn(items): # This is a passthrough function + return items + + +@register_metric( + metric="bits_per_byte", + higher_is_better=False, + output_type="loglikelihood_rolling", + aggregation="bits_per_byte", +) +def bits_per_byte_fn(items): # This is a passthrough function + return items + + +def pop_stddev(arr): + mu = mean(arr) + return math.sqrt(sum([(x - mu) ** 2 for x in arr]) / len(arr)) + + +def sample_stddev(arr): + mu = mean(arr) + return math.sqrt(sum([(x - mu) ** 2 for x in arr]) / (len(arr) - 1)) + + +def mean_stderr(arr): + return sample_stddev(arr) / math.sqrt(len(arr)) + + +@register_metric( + metric="bypass", + higher_is_better=True, + output_type=["loglikelihood", "multiple_choice", "generate_until"], + aggregation="bypass", +) +def bypass(items): + return None + + +@register_metric( + metric="mcc", + higher_is_better=True, + output_type="multiple_choice", + aggregation="matthews_corrcoef", +) +def mcc_fn(items): # This is a passthrough function + return items + + +@register_metric( + metric="f1", + higher_is_better=True, + output_type="multiple_choice", + aggregation="f1", +) +def f1_fn(items): # This is a passthrough function + return items + + +@register_metric( + metric="bleu", + higher_is_better=True, + output_type="generate_until", + aggregation="bleu", +) +def bleu_fn(items): # This is a passthrough function + return items + + +@register_metric( + metric="chrf", + higher_is_better=True, + output_type="generate_until", + aggregation="chrf", +) +def chrf_fn(items): # This is a passthrough function + return items + + +@register_metric( + metric="ter", + higher_is_better=True, + output_type="generate_until", + aggregation="ter", +) +def ter_fn(items): # This is a passthrough function + return items + + +@register_metric( + metric="acc_all", + higher_is_better=True, + output_type="loglikelihood", + aggregation="mean", +) +def acc_all(items): + # Only count as correct if all answers are labeled correctly for each question + question_scoring_dict = {} + preds = list(zip(*items))[0] + docs = list(zip(*items))[1] + + for doc, pred in zip(docs, preds): + paragraph_id = doc["idx"]["paragraph"] + question_id = doc["idx"]["question"] + if (paragraph_id, question_id) not in question_scoring_dict: + question_scoring_dict[(paragraph_id, question_id)] = [] + + gold_label = doc["label"] == 1 + + question_scoring_dict[(paragraph_id, question_id)].append(gold_label == pred) + acc = np.mean([int(all(x)) for x in question_scoring_dict.values()]) + return acc + + +def acc_all_stderr(items): + # Only count as correct if all answers are labeled correctly for each question + question_scoring_dict = {} + preds = list(zip(*items))[0] + docs = list(zip(*items))[1] + + for doc, pred in zip(docs, preds): + question_id = doc["idx"]["question"] + if question_id not in question_scoring_dict: + question_scoring_dict[question_id] = [] + + gold_label = doc["label"] == 1 + question_scoring_dict[question_id].append(gold_label == pred) + + acc = mean_stderr([int(all(x)) for x in question_scoring_dict.values()]) + return acc + + +def metric_max_over_ground_truths(metric_fn, prediction, ground_truths): + """Compute max metric between prediction and each ground truth.""" + scores_for_ground_truths = [] + for ground_truth in ground_truths: + score = metric_fn(prediction, ground_truth) + scores_for_ground_truths.append(score) + return max(scores_for_ground_truths) + + +def weighted_mean(items): + a, b = zip(*items) + return sum(a) / sum(b) + + +def is_non_str_iterable(obj): + return isinstance(obj, Iterable) and not isinstance(obj, str) + + +def _sacreformat(refs, preds): + """Format refs and preds for sacrebleu corpus calculation. It is very particular""" + # Sacrebleu expects (List[str], List[List[str]) + # e.g. sacrebleu.corpus_bleu([pred_t], [[ref1_stream], [ref2_stream], ...]) + + # Note [ref1_stream] is the first reference for each pred. + # So lists are size N and (M, N) for N preds and M possible refs for each pred + # This is a different order of dimensions that I would expect + + # We expect refs to be List[str] or List[List[str]], the outer list corresponding to preds + # Must become List[List[str]] with the inner list corresponding to preds + if not is_non_str_iterable(refs): + refs = list(refs) + if not is_non_str_iterable(refs[0]): + refs = [[ref] for ref in refs] + refs = list(zip(*refs)) + # Note the number of refs in each ref list much match the number of preds + + # We expect preds to be List[str] or List[List[str]]. Must become List[str] + if not is_non_str_iterable(preds): + preds = list(preds) + if is_non_str_iterable(preds[0]): + assert len(preds[0]) == 1, f"Pred must be a str, was {preds[0]}" + preds = [pred[0] for pred in preds] + + return refs, preds + + +# stderr stuff + + +class _bootstrap_internal: + def __init__(self, f, n) -> None: + self.f = f + self.n = n + + def __call__(self, v): + i, xs = v + rnd = random.Random() + rnd.seed(i) + res = [] + for _ in range(self.n): + res.append(self.f(rnd.choices(xs, k=len(xs)))) + return res + + +def bootstrap_stderr(f, xs, iters): + import multiprocessing as mp + + pool = mp.Pool(mp.cpu_count()) + # this gives a biased estimate of the stderr (i.e w/ the mean, it gives something + # equivalent to stderr calculated without Bessel's correction in the stddev. + # Unfortunately, I haven't been able to figure out what the right correction is + # to make the bootstrap unbiased - i considered multiplying by sqrt(n/(n-1)) but + # that would be ad-hoc and I can't prove that that would actually be an unbiased estimator) + # Thankfully, shouldn't matter because our samples are pretty big usually anyways + res = [] + chunk_size = min(1000, iters) + from tqdm import tqdm + + print("bootstrapping for stddev:", f.__name__) + for bootstrap in tqdm( + pool.imap( + _bootstrap_internal(f, chunk_size), + [(i, xs) for i in range(iters // chunk_size)], + ), + total=iters // chunk_size, + ): + # sample w replacement + res.extend(bootstrap) + + pool.close() + return sample_stddev(res) + + +def stderr_for_metric(metric, bootstrap_iters): + bootstrappable = [ + median, + matthews_corrcoef, + f1_score, + perplexity, + bleu, + chrf, + ter, + ] + + if metric in bootstrappable: + return lambda x: bootstrap_stderr(metric, x, iters=bootstrap_iters) + + stderr = {mean: mean_stderr, acc_all: acc_all_stderr} + + return stderr.get(metric, None) + + +def pooled_sample_stderr(stderrs: List[float], sizes: List[int]): + # Used to aggregate bootstrapped stderrs across subtasks in a group, + # when we are weighting by the size of each subtask. + # + + assert len(stderrs) == len(sizes) + + # formula source: https://en.wikipedia.org/wiki/Pooled_variance + # and: https://stats.stackexchange.com/a/4841331 + # this empirically seems to match running `stderr_for_metric` on all instances + # from the subtasks concatenated with each other. + pooled_sample_var = ( + sum([(size - 1) * stderr**2 * size for size, stderr in zip(sizes, stderrs)]) + ) / (sum(sizes) - len(sizes)) + + return np.sqrt(pooled_sample_var / sum(sizes)) + + +def combined_sample_stderr(stderrs: List[float], sizes: List[int], metrics=None): + assert ( + metrics is not None + ), "Need to pass a list of each subtask's metric for this stderr aggregation" + assert len(stderrs) == len(sizes) and len(sizes) == len(metrics) + + # See https://github.com/EleutherAI/lm-evaluation-harness/pull/1390 for more documentation. + # This formula depends on sample means. + # removed because it seems to give erroneously huge stderrs for groupings of tasks + # and does not seem to match up with bootstrap-calculated stderrs for groups. + + ### don't use this unless a statistician has told you it's the right thing to do ### + + # accumulators: we'll aggregate pairwise N - 1 times + variance = stderrs[0] ** 2 + curr_size = sizes[0] + curr_score = metrics[0] + + for stderr, size, score in zip(stderrs[1:], sizes[1:], metrics[1:]): + curr_score = ((curr_score * curr_size) + (score * size)) / ( + curr_size + size + ) # NOTE: this assumes our aggregation fn is "mean" + + variance = ((curr_size - 1) * variance + (size - 1) * (stderr**2)) / ( + curr_size + size - 1 + ) + curr_size * size / ((curr_size + size) * (curr_size + size - 1)) * ( + curr_score - score + ) ** 2 + + return np.sqrt(variance) + + +def aggregate_subtask_metrics(metrics, sizes, weight_by_size=True): + # A helper function that is used to aggregate + # subtask scores cross-task. + # TODO: does not hold for non-mean aggregations + if not weight_by_size: + sizes = [1] * len(sizes) + + assert len(metrics) == len(sizes) + + return sum([metric * size for metric, size in zip(metrics, sizes)]) / sum(sizes) diff --git a/lm-evaluation-harness/lm_eval/api/model.py b/lm-evaluation-harness/lm_eval/api/model.py new file mode 100644 index 0000000000000000000000000000000000000000..a9c451ad2ea555296a0c8aee6c070ee903147f1c --- /dev/null +++ b/lm-evaluation-harness/lm_eval/api/model.py @@ -0,0 +1,346 @@ +import abc +import hashlib +import json +import logging +import os +from typing import List, Optional, Tuple, Type, TypeVar + +import transformers +from sqlitedict import SqliteDict +from tqdm import tqdm + +from lm_eval import utils + + +eval_logger = logging.getLogger("lm-eval") + +T = TypeVar("T", bound="LM") + + +class LM(abc.ABC): + def __init__(self) -> None: + """Defines the interface that should be implemented by all LM subclasses. + LMs are assumed to take text (strings) as input and yield strings as output + (inputs/outputs should be tokenization-agnostic.) + + """ + # set rank and world size to a single process, by default. + self._rank = 0 + self._world_size = 1 + self.cache_hook = CacheHook(None) + + @abc.abstractmethod + def loglikelihood(self, requests) -> List[Tuple[float, bool]]: + """Compute log-likelihood of generating a continuation from a context. + Downstream tasks should attempt to use loglikelihood instead of other + LM calls whenever possible. + + :param requests: list[Instance] + A list of Instance objects, with property `args` which returns a tuple (context, continuation). + `context: str` + Context string. Implementations of LM must be able to handle an + empty context string. + `continuation: str` + The continuation over which log likelihood will be calculated. If + there is a word boundary, the space should be in the continuation. + For example, context="hello" continuation=" world" is correct. + + :return: list[tuple[float, bool]] + A list of pairs (logprob, isgreedy) + `logprob: float` + The log probability of `continuation`. + `isgreedy`: + Whether `continuation` would be generated by greedy sampling from `context`. + """ + pass + + @abc.abstractmethod + def loglikelihood_rolling(self, requests) -> List[Tuple[float]]: + """Compute full log-likelihood of a string, with no truncation, for perplexity computation + - We will use the full max context length of the model. + - For inputs that exceed the max context length, we divide the tokenized string into chunks of up to + the max context length. + - IMPORTANT: Each document's loglikelihood/perplexity is computed *separately*, unlike other implementations + which may simply concatenate multiple documents together. + - IMPORTANT: We maximize the amount of context for each prediction. Specifically, for inputs that we break into + multiple chunks, the last input will still a full-sized context. + Example: + Input tokens: [ 0 1 2 3 4 5 6 7 8 9 ] + Prefix: BOS/EOS + Max context length: 4 + Resulting input/prediction pairs: + + INPUT: BOS 0 1 2 + PRED: 0 1 2 3 + + INPUT: 3 4 5 6 + PRED: 4 5 6 7 + + INPUT: 5 6 7 8 + PRED: 8 9 + + Observe that: + 1. Each token is predicted exactly once + 2. For the last pair, we provide the full context, but only score the last two tokens + + :param requests: list[Instance] + A list of Instance objects with property `args` which returns a tuple (context,). + string: str + String for which we are computing overall loglikelihood + :return: list[tuple[float]] + A list of tuples (logprob,) + logprob: float + The log probability of `context` conditioned on the BOS/EOS token. + Can also be overridden for custom cases by `prefix_token_id`. + """ + pass + + # TODO: Add an optional max length + @abc.abstractmethod + def generate_until(self, requests) -> List[str]: + """Generate greedily until a stopping sequence + + :param requests: list[Instance] + A list of Instance objects with property `args` which returns a tuple (context, until). + context: str + Context string + until: [str] + The string sequences to generate until. These string sequences + may each span across multiple tokens, or may be part of one token. + :return: list[str] + A list of strings continuation + continuation: str + The generated continuation. + """ + pass + + @classmethod + def create_from_arg_string( + cls: Type[T], arg_string: str, additional_config: Optional[dict] = None + ) -> T: + """ + Creates an instance of the LM class using the given argument string and additional config. + + Parameters: + - arg_string: A string containing arguments in the format key1=value1,key2=value2. + - additional_config: Optional dictionary containing additional configuration parameters. + + Returns: + - Instance of the LM class. + """ + additional_config = {} if additional_config is None else additional_config + args = utils.simple_parse_args_string(arg_string) + args2 = {k: v for k, v in additional_config.items() if v is not None} + return cls(**args, **args2) + + @classmethod + def create_from_arg_obj( + cls: Type[T], arg_dict: dict, additional_config: Optional[dict] = None + ) -> T: + """ + Creates an instance of the LM class using the given arg_obj + + Parameters: + - arg_obj: A dict containing arguments in the format key1=value1,key2=value2. + - additional_config: Optional dictionary containing additional configuration parameters. + + Returns: + - Instance of the LM class. + """ + + additional_config = {} if additional_config is None else additional_config + additional_config = { + k: v for k, v in additional_config.items() if v is not None + } + + return cls(**arg_dict, **additional_config) + + @property + def rank(self): + # used in the case of parallelism. Hardcoded to + # ensure no errors arise using API models which do + # not support multi-device parallelism nor expect it. + return self._rank + + @property + def world_size(self): + # used in the case of parallelism. Hardcoded to + # ensure no errors arise using API models which do + # not support multi-device parallelism nor expect it. + return self._world_size + + def set_cache_hook(self, cache_hook) -> None: + self.cache_hook = cache_hook + + +### SQLite-based caching of LM responses +def hash_args(attr, args): + dat = json.dumps([attr] + list(args)) + return hashlib.sha256(dat.encode("utf-8")).hexdigest() + + +class CacheHook: + def __init__(self, cachinglm) -> None: + if cachinglm is None: + self.dbdict = None + return + + self.dbdict = cachinglm.dbdict + + def add_partial(self, attr, req, res) -> None: + if self.dbdict is None: + return + hsh = hash_args(attr, req) + self.dbdict[hsh] = res + + +class CachingLM: + def __init__(self, lm, cache_db) -> None: + """LM wrapper that returns cached results if they exist, and uses the underlying LM if not. + + :param lm: LM + Underlying LM + :param cache_db: str + Path to cache db + """ + self.lm = lm + self.cache_db = cache_db + if os.path.dirname(cache_db): + os.makedirs(os.path.dirname(cache_db), exist_ok=True) + self.dbdict = SqliteDict(cache_db, autocommit=True) + + # add hook to lm + lm.set_cache_hook(self.get_cache_hook()) + + def __getattr__(self, attr): + lm_attr = getattr(self.lm, attr) + if not callable(lm_attr): + return lm_attr + + def fn(requests): + res = [] + remaining_reqs = [] + warned = False + # figure out which ones are cached and which ones are new + eval_logger.info( + f"Loading '{attr}' responses from cache '{self.cache_db}' where possible..." + ) + for req in tqdm(requests, desc="Checking cached requests"): + hsh = hash_args(attr, req.args) + if attr == "generate_until" and req.args[1].get("do_sample", False): + # when we are doing non-greedy generation, don't use the cache + # (else every "randomly sampled" generation would be identical for repeats > 1). + if not warned: + eval_logger.warning( + f"Arguments to lm.generate_until() '{req.args[1]}' include non-deterministic sampling. Caching will not be performed for such requests." + ) + warned = True + res.append(None) + remaining_reqs.append(req) + elif hsh in self.dbdict: + ob = self.dbdict[hsh] + + assert ob is not None + + res.append(ob) + else: + res.append(None) + remaining_reqs.append(req) + eval_logger.info( + f"Cached requests: {len(requests) - len(remaining_reqs)}, Requests remaining: {len(remaining_reqs)}" + ) + # actually run the LM on the requests that do not have cached results + rem_res = getattr(self.lm, attr)(remaining_reqs) + + # stick the new ones back into the list and also cache any of the new ones + resptr = 0 + for req, r in zip(remaining_reqs, rem_res): + while res[resptr] is not None: + resptr += 1 + + res[resptr] = r + + # caching + hsh = hash_args(attr, req.args) + self.dbdict[hsh] = r + self.dbdict.commit() + + return res + + return fn + + def get_cache_hook(self): + return CacheHook(self) + + +class TemplateLM(LM): + """ + A class acting as intermediary between the LM base class + and boilerplate often included in other LM subclasses. + """ + + @property + @abc.abstractmethod + def eot_token_id(self): + pass + + @property + def prefix_token_id(self): + # it is used as prefix for loglikelihood + return self.eot_token_id + + @abc.abstractmethod + def tok_encode(self, string: str, **kwargs): + pass + + @abc.abstractmethod + def _loglikelihood_tokens(self, requests, **kwargs): + pass + + def _encode_pair(self, context, continuation): + n_spaces = len(context) - len(context.rstrip()) + if n_spaces > 0: + continuation = context[-n_spaces:] + continuation + context = context[:-n_spaces] + + model_class = getattr(self, "AUTO_MODEL_CLASS", None) + + if model_class == transformers.AutoModelForSeq2SeqLM: + context_enc = self.tok_encode(context) + continuation_enc = self.tok_encode(continuation, add_special_tokens=False) + else: + whole_enc = self.tok_encode(context + continuation) + context_enc = self.tok_encode(context) + + context_enc_len = len(context_enc) + continuation_enc = whole_enc[context_enc_len:] + + return context_enc, continuation_enc + + def loglikelihood( + self, requests, disable_tqdm: bool = False + ) -> List[Tuple[float, bool]]: + new_reqs = [] + for context, continuation in [req.args for req in requests]: + if context == "": + # BOS or EOS as context + context_enc, continuation_enc = ( + [self.prefix_token_id], + self.tok_encode(continuation), + ) + else: + context_enc, continuation_enc = self._encode_pair(context, continuation) + + new_reqs.append(((context, continuation), context_enc, continuation_enc)) + + return self._loglikelihood_tokens(new_reqs, disable_tqdm=disable_tqdm) + + @abc.abstractmethod + def loglikelihood_rolling( + self, requests, disable_tqdm: bool = False + ) -> List[Tuple[float, bool]]: + pass + + @abc.abstractmethod + def generate_until(self, requests, disable_tqdm: bool = False) -> List[str]: + pass diff --git a/lm-evaluation-harness/lm_eval/api/registry.py b/lm-evaluation-harness/lm_eval/api/registry.py new file mode 100644 index 0000000000000000000000000000000000000000..6f1baa586803987932bf579bc82d11ca8960fa73 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/api/registry.py @@ -0,0 +1,172 @@ +import logging +from typing import Callable, Dict + +import evaluate as hf_evaluate + +from lm_eval.api.model import LM + + +eval_logger = logging.getLogger("lm-eval") + +MODEL_REGISTRY = {} + + +def register_model(*names): + # either pass a list or a single alias. + # function receives them as a tuple of strings + + def decorate(cls): + for name in names: + assert issubclass( + cls, LM + ), f"Model '{name}' ({cls.__name__}) must extend LM class" + + assert ( + name not in MODEL_REGISTRY + ), f"Model named '{name}' conflicts with existing model! Please register with a non-conflicting alias instead." + + MODEL_REGISTRY[name] = cls + return cls + + return decorate + + +def get_model(model_name): + try: + return MODEL_REGISTRY[model_name] + except KeyError: + raise ValueError( + f"Attempted to load model '{model_name}', but no model for this name found! Supported model names: {', '.join(MODEL_REGISTRY.keys())}" + ) + + +TASK_REGISTRY = {} +GROUP_REGISTRY = {} +ALL_TASKS = set() +func2task_index = {} + + +def register_task(name): + def decorate(fn): + assert ( + name not in TASK_REGISTRY + ), f"task named '{name}' conflicts with existing registered task!" + + TASK_REGISTRY[name] = fn + ALL_TASKS.add(name) + func2task_index[fn.__name__] = name + return fn + + return decorate + + +def register_group(name): + def decorate(fn): + func_name = func2task_index[fn.__name__] + if name in GROUP_REGISTRY: + GROUP_REGISTRY[name].append(func_name) + else: + GROUP_REGISTRY[name] = [func_name] + ALL_TASKS.add(name) + return fn + + return decorate + + +OUTPUT_TYPE_REGISTRY = {} +METRIC_REGISTRY = {} +METRIC_AGGREGATION_REGISTRY = {} +AGGREGATION_REGISTRY: Dict[str, Callable[[], Dict[str, Callable]]] = {} +HIGHER_IS_BETTER_REGISTRY = {} + +DEFAULT_METRIC_REGISTRY = { + "loglikelihood": [ + "perplexity", + "acc", + ], + "loglikelihood_rolling": ["word_perplexity", "byte_perplexity", "bits_per_byte"], + "multiple_choice": ["acc", "acc_norm"], + "generate_until": ["exact_match"], +} + + +def register_metric(**args): + # TODO: do we want to enforce a certain interface to registered metrics? + def decorate(fn): + assert "metric" in args + name = args["metric"] + + for key, registry in [ + ("metric", METRIC_REGISTRY), + ("higher_is_better", HIGHER_IS_BETTER_REGISTRY), + ("aggregation", METRIC_AGGREGATION_REGISTRY), + ]: + if key in args: + value = args[key] + assert ( + value not in registry + ), f"{key} named '{value}' conflicts with existing registered {key}!" + + if key == "metric": + registry[name] = fn + elif key == "aggregation": + registry[name] = AGGREGATION_REGISTRY[value] + else: + registry[name] = value + + return fn + + return decorate + + +def get_metric(name: str, hf_evaluate_metric=False) -> Callable: + if not hf_evaluate_metric: + if name in METRIC_REGISTRY: + return METRIC_REGISTRY[name] + else: + eval_logger.warning( + f"Could not find registered metric '{name}' in lm-eval, searching in HF Evaluate library..." + ) + + try: + metric_object = hf_evaluate.load(name) + return metric_object.compute + except Exception: + eval_logger.error( + f"{name} not found in the evaluate library! Please check https://huggingface.co/evaluate-metric", + ) + + +def register_aggregation(name: str): + def decorate(fn): + assert ( + name not in AGGREGATION_REGISTRY + ), f"aggregation named '{name}' conflicts with existing registered aggregation!" + + AGGREGATION_REGISTRY[name] = fn + return fn + + return decorate + + +def get_aggregation(name: str) -> Callable[[], Dict[str, Callable]]: + try: + return AGGREGATION_REGISTRY[name] + except KeyError: + eval_logger.warning(f"{name} not a registered aggregation metric!") + + +def get_metric_aggregation(name: str) -> Callable[[], Dict[str, Callable]]: + try: + return METRIC_AGGREGATION_REGISTRY[name] + except KeyError: + eval_logger.warning(f"{name} metric is not assigned a default aggregation!") + + +def is_higher_better(metric_name) -> bool: + try: + return HIGHER_IS_BETTER_REGISTRY[metric_name] + except KeyError: + eval_logger.warning( + f"higher_is_better not specified for metric '{metric_name}'!" + ) diff --git a/lm-evaluation-harness/lm_eval/api/samplers.py b/lm-evaluation-harness/lm_eval/api/samplers.py new file mode 100644 index 0000000000000000000000000000000000000000..57e3a6f1a44d1a14a8156949eee3d5db28fd06b9 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/api/samplers.py @@ -0,0 +1,114 @@ +class ContextSampler: + def __init__(self, docs, task, fewshot_indices=None, rnd=None) -> None: + self.rnd = rnd + assert self.rnd, "must pass rnd to FewShotSampler!" + + self.task = task + self.config = task._config + + self.target_delimiter = self.config.target_delimiter + self.fewshot_delimiter = self.config.fewshot_delimiter + + self.doc_to_text = self.task.doc_to_text + self.doc_to_target = self.task.doc_to_target + self.doc_to_choice = self.task.doc_to_choice + + self.docs = docs # HF dataset split, provided by task._fewshot_docs() + if fewshot_indices: # subset few-shot docs from + self.docs = self.docs.select(fewshot_indices) + + def get_context(self, doc, num_fewshot): + # draw an extra fewshot sample if using same split as evaluating on + n_samples = ( + num_fewshot + 1 + if self.config.fewshot_split == self.config.test_split + else num_fewshot + ) + + # draw `n_samples` docs from fewshot_docs + fewshotex = self.sample(n_samples) + + # get rid of the doc that's the one we're evaluating, if it's in the fewshot + # TODO: should we just stop people from using fewshot from same split as evaluating? + selected_docs = [x for x in fewshotex if x != doc][:num_fewshot] + + labeled_examples = ( + self.fewshot_delimiter.join( + [ + # TODO: is separating doc_to_text and doc_to_target by one space always desired? + ( + self.doc_to_text(doc) + if ( + self.config.doc_to_choice is None + or isinstance(self.doc_to_text(doc), str) + ) + else self.doc_to_choice(doc)[self.doc_to_text(doc)] + ) + + self.target_delimiter + + ( + str(self.doc_to_target(doc)[0]) + if isinstance(self.doc_to_target(doc), list) + else self.doc_to_target(doc) + if ( + self.config.doc_to_choice is None + or isinstance(self.doc_to_target(doc), str) + ) + else str(self.doc_to_choice(doc)[self.doc_to_target(doc)]) + ) + for doc in selected_docs + ] + ) + + self.fewshot_delimiter + ) + + return labeled_examples + + def sample(self, n): + """ + Draw `n` samples from our fewshot docs. This method should be overridden by subclasses. + """ + + return self.rnd.sample(self.docs, n) + + +class FirstNSampler(ContextSampler): + def sample(self, n) -> None: + """ + Draw the first `n` samples in order from the specified split. + Used for tasks with "canonical" ordered fewshot examples, such as MMLU and CMMLU. + """ + assert ( + n <= len(self.docs) + ), f"Error: number of fewshot samples requested exceeds the {len(self.docs)} that are available." + return self.docs[:n] + + +class BalancedSampler(ContextSampler): + def sample(self, n) -> None: + """ + TODO: this should return approximately class-balanced samples from our fewshot examples. + TODO: what order should they be in? maybe random? + """ + + pass + + +class ManualSampler(ContextSampler): + def sample(self, n) -> None: + """ """ + pass + + +SAMPLER_REGISTRY = { + "default": ContextSampler, + "first_n": FirstNSampler, +} + + +def get_sampler(name): + try: + return SAMPLER_REGISTRY[name] + except KeyError: + raise ValueError( + f"Attempted to use contextsampler '{name}', but no sampling strategy for this name found! Supported model names: {', '.join(SAMPLER_REGISTRY.keys())}" + ) diff --git a/lm-evaluation-harness/lm_eval/api/task.py b/lm-evaluation-harness/lm_eval/api/task.py new file mode 100644 index 0000000000000000000000000000000000000000..be40434a9d098c872c745a8027d5b355dd237b5f --- /dev/null +++ b/lm-evaluation-harness/lm_eval/api/task.py @@ -0,0 +1,1498 @@ +import abc +import ast +import logging +import random +import re +from collections.abc import Callable +from copy import deepcopy +from dataclasses import asdict, dataclass +from inspect import getsource +from typing import ( + Any, + Dict, + Iterable, + Iterator, + List, + Literal, + Mapping, + Optional, + Tuple, + Union, +) + +import datasets +import numpy as np +from tqdm import tqdm + +from lm_eval import utils +from lm_eval.api import samplers +from lm_eval.api.instance import Instance, OutputType +from lm_eval.api.metrics import bits_per_byte, mean, weighted_perplexity +from lm_eval.api.registry import ( + AGGREGATION_REGISTRY, + DEFAULT_METRIC_REGISTRY, + get_aggregation, + get_metric, + get_metric_aggregation, + is_higher_better, +) +from lm_eval.caching.cache import load_from_cache, save_to_cache +from lm_eval.filters import build_filter_ensemble +from lm_eval.prompts import get_prompt + + +ALL_OUTPUT_TYPES = [ + "loglikelihood", + "multiple_choice", + "loglikelihood_rolling", + "generate_until", +] + +eval_logger = logging.getLogger("lm-eval") + + +@dataclass +class TaskConfig(dict): + # task naming/registry + task: Optional[str] = None + task_alias: Optional[str] = None + group: Optional[Union[str, list]] = None + group_alias: Optional[Union[str, list]] = None + # HF dataset options. + # which dataset to use, + # and what splits for what purpose + dataset_path: Optional[str] = None + dataset_name: Optional[str] = None + dataset_kwargs: Optional[dict] = None + training_split: Optional[str] = None + validation_split: Optional[str] = None + test_split: Optional[str] = None + fewshot_split: Optional[ + str + ] = None # TODO: assert that this not None if num_fewshot > 0. (?) assert if this is same split as one evaling (?) + # formatting / prompting options. + # see docs/advanced_task_guide.md for more info + process_docs: Optional[Callable] = None + doc_to_text: Optional[Union[Callable, str]] = None + doc_to_target: Optional[Union[Callable, str]] = None + doc_to_choice: Optional[Union[Callable, str, dict, list]] = None + process_results: Optional[Union[Callable, str]] = None + use_prompt: Optional[str] = None + description: str = "" + target_delimiter: str = " " + fewshot_delimiter: str = "\n\n" + fewshot_config: Optional[dict] = None + # runtime configuration options + num_fewshot: Optional[int] = None + # scoring options + metric_list: Optional[list] = None + output_type: OutputType = "generate_until" + generation_kwargs: Optional[dict] = None + repeats: int = 1 + filter_list: Optional[Union[str, list]] = None + should_decontaminate: bool = False + doc_to_decontamination_query: Optional[str] = None + metadata: Optional[ + dict + ] = None # by default, not used in the code. allows for users to pass arbitrary info to tasks + + def __post_init__(self) -> None: + if self.generation_kwargs is not None: + if self.output_type != "generate_until": + eval_logger.warning( + f"[{self.task}] passed `generation_kwargs`, but not using `output_type: generate_until`!" + ) + + if "temperature" in self.generation_kwargs: + self.generation_kwargs["temperature"] = float( + self.generation_kwargs["temperature"] + ) + + if "until" not in self.generation_kwargs: + self.generation_kwargs["until"] = [self.fewshot_delimiter] + else: + if self.output_type == "generate_until": + # ensure that we greedily generate in absence of explicit arguments otherwise + self.generation_kwargs = { + "until": ( + None + if self.fewshot_delimiter is None + else [self.fewshot_delimiter] + ), + "do_sample": False, + } + + def __getitem__(self, item): + return getattr(self, item) + + def __setitem__(self, item, value): + return setattr(self, item, value) + + def to_dict(self, keep_callable: bool = False) -> dict: + """dumps the current config as a dictionary object, as a printable format. + null fields will not be printed. + Used for dumping results alongside full task configuration + + :return: dict + A printable dictionary version of the TaskConfig object. + + # TODO: should any default value in the TaskConfig not be printed? + """ + cfg_dict = asdict(self) + # remove values that are `None` + for k, v in list(cfg_dict.items()): + if v is None: + cfg_dict.pop(k) + elif k == "metric_list": + for metric_dict in v: + for metric_key, metric_value in metric_dict.items(): + if callable(metric_value): + metric_dict[metric_key] = self.serialize_function( + metric_value, keep_callable=keep_callable + ) + cfg_dict[k] = v + elif callable(v): + cfg_dict[k] = self.serialize_function(v, keep_callable=keep_callable) + return cfg_dict + + def serialize_function( + self, value: Union[Callable, str], keep_callable=False + ) -> Union[Callable, str]: + """Serializes a given function or string. + + If 'keep_callable' is True, the original callable is returned. + Otherwise, attempts to return the source code of the callable using 'getsource'. + """ + if keep_callable: + return value + else: + try: + return getsource(value) + except (TypeError, OSError): + return str(value) + + +class Task(abc.ABC): + """A task represents an entire benchmark including its dataset, problems, + answers, and evaluation methods. See BoolQ for a simple example implementation + + A `doc` can be any python object which represents one instance of evaluation. + This is usually a dictionary e.g. + {"question": ..., "answer": ...} or + {"question": ..., question, answer) + """ + + VERSION: Optional[Union[int, str]] = None + + # The name of the `Task` benchmark as denoted in the HuggingFace datasets Hub + # or a path to a custom `datasets` loading script. + DATASET_PATH: Optional[str] = None + + # The name of a subset within `DATASET_PATH`. + DATASET_NAME: Optional[str] = None + + OUTPUT_TYPE: Optional[OutputType] = None + + def __init__( + self, + data_dir: Optional[str] = None, + cache_dir: Optional[str] = None, + download_mode: Optional[datasets.DownloadMode] = None, + config: Optional[Mapping] = None, # Union[dict, TaskConfig] + ) -> None: + """ + :param data_dir: str + Stores the path to a local folder containing the `Task`'s data files. + Use this to specify the path to manually downloaded data (usually when + the dataset is not publicly accessible). + :param cache_dir: str + The directory to read/write the `Task` dataset. This follows the + HuggingFace `datasets` API with the default cache directory located at: + `~/.cache/huggingface/datasets` + NOTE: You can change the cache location globally for a given process + to another directory: + `export HF_DATASETS_CACHE="/path/to/another/directory"` + :param download_mode: datasets.DownloadMode + How to treat pre-existing `Task` downloads and data. + - `datasets.DownloadMode.REUSE_DATASET_IF_EXISTS` + Reuse download and reuse dataset. + - `datasets.DownloadMode.REUSE_CACHE_IF_EXISTS` + Reuse download with fresh dataset. + - `datasets.DownloadMode.FORCE_REDOWNLOAD` + Fresh download and fresh dataset. + """ + self.download(data_dir, cache_dir, download_mode) + self._training_docs: Optional[list] = None + self._fewshot_docs: Optional[list] = None + self._instances: Optional[List[Instance]] = None + + self._config: TaskConfig = TaskConfig({**config}) if config else TaskConfig() + + self._filters = [build_filter_ensemble("none", [["take_first", None]])] + + def download( + self, + data_dir: Optional[str] = None, + cache_dir: Optional[str] = None, + download_mode=None, + ) -> None: + """Downloads and returns the task dataset. + Override this method to download the dataset from a custom API. + + :param data_dir: str + Stores the path to a local folder containing the `Task`'s data files. + Use this to specify the path to manually downloaded data (usually when + the dataset is not publicly accessible). + :param cache_dir: str + The directory to read/write the `Task` dataset. This follows the + HuggingFace `datasets` API with the default cache directory located at: + `~/.cache/huggingface/datasets` + NOTE: You can change the cache location globally for a given process + by setting the shell environment variable, `HF_DATASETS_CACHE`, + to another directory: + `export HF_DATASETS_CACHE="/path/to/another/directory"` + :param download_mode: datasets.DownloadMode + How to treat pre-existing `Task` downloads and data. + - `datasets.DownloadMode.REUSE_DATASET_IF_EXISTS` + Reuse download and reuse dataset. + - `datasets.DownloadMode.REUSE_CACHE_IF_EXISTS` + Reuse download with fresh dataset. + - `datasets.DownloadMode.FORCE_REDOWNLOAD` + Fresh download and fresh dataset. + """ + self.dataset = datasets.load_dataset( + path=self.DATASET_PATH, + name=self.DATASET_NAME, + data_dir=data_dir, + cache_dir=cache_dir, + download_mode=download_mode, + ) + + @property + def config(self) -> TaskConfig: + """Returns the TaskConfig associated with this class.""" + return self._config + + @abc.abstractmethod + def has_training_docs(self): + """Whether the task has a training set""" + pass + + @abc.abstractmethod + def has_validation_docs(self): + """Whether the task has a validation set""" + pass + + @abc.abstractmethod + def has_test_docs(self): + """Whether the task has a test set""" + pass + + def training_docs(self) -> Iterable: + """ + :return: Iterable[obj] + A iterable of any object, that doc_to_text can handle + """ + return [] + + def validation_docs(self) -> Iterable: + """ + :return: Iterable[obj] + A iterable of any object, that doc_to_text can handle + """ + return [] + + def test_docs(self) -> Iterable: + """ + :return: Iterable[obj] + A iterable of any object, that doc_to_text can handle + """ + return [] + + def fewshot_docs(self) -> Iterable: + """ + :return: Iterable[obj] + A iterable of any object, that doc_to_text can handle + """ + if self.has_training_docs(): + return self.training_docs() + elif self.has_validation_docs(): + return self.validation_docs() + else: + eval_logger.warning( + f"[Task: {self.config.task}] has_training_docs and has_validation_docs are False" + ", using test_docs as fewshot_docs but this is not recommended." + ) + return self.test_docs() + + def _process_doc(self, doc: dict) -> dict: + """ + Override this to process (detokenize, strip, replace, etc.) individual + documents. This can be used in a map over documents of a data split. + E.g. `map(self._process_doc, self.dataset["validation"])` + + :return: dict + The processed version of the specified `doc`. + """ + return doc + + @property + def instances(self) -> List[Instance]: + """After calling `task.build_all_requests()`, tasks + maintain a list of the dataset instances which will be evaluated. + """ + return self._instances + + def fewshot_examples(self, k, rnd): + if self._training_docs is None: + self._training_docs = list(self.training_docs()) + + return rnd.sample(self._training_docs, k) + + def doc_to_decontamination_query(self, doc): + raise NotImplementedError( + "Override doc_to_decontamination_query with document specific decontamination query." + ) + + @abc.abstractmethod + def doc_to_text(self, doc): + pass + + @abc.abstractmethod + def doc_to_target(self, doc): + pass + + def build_all_requests( + self, + *, + limit=None, + rank=None, + world_size=None, + cache_requests=False, + rewrite_requests_cache=False, + ) -> None: + """Build a set of Instances for a task, and store them in task.instances""" + + # used with caching + og_limit = limit + + cache_key = f"requests-{self._config.task}-{self.config.num_fewshot}shot-rank{rank}-world_size{world_size}" + + cached_instances = load_from_cache(file_name=cache_key) + + if cache_requests and cached_instances and not rewrite_requests_cache: + cached_instances = cached_instances[:limit] + + flattened_instances = [ + instance + for instance_group in cached_instances + for instance in instance_group + ] + + self._instances = flattened_instances + return + + eval_logger.info(f"Building contexts for {self.config.task} on rank {rank}...") + + instances = [] + + # process all documents when caching is specified for simplicity + if ( + cache_requests + and (not cached_instances or rewrite_requests_cache) + and limit is not None + ): + limit = None + + doc_id_docs = list( + self.doc_iterator(rank=rank, limit=limit, world_size=world_size) + ) + + num_docs = len(doc_id_docs) + + for doc_id, doc in tqdm( + doc_id_docs, + total=num_docs, + ): + # sample fewshot context #TODO: need to offset doc_id by rank now! + fewshot_ctx = self.fewshot_context( + doc, + 0 if self.config.num_fewshot is None else self.config.num_fewshot, + ) + + # TODO: we should override self.config.repeats if doing greedy gen so users don't waste time+compute + inst = self.construct_requests( + doc=doc, + ctx=fewshot_ctx, + metadata=(self.config["task"], doc_id, self.config.repeats), + ) + + if not isinstance(inst, list): + inst = [inst] + + instances.append(inst) + + # now flatten, this is to allow slicing to work with pickles + + sliced_instances = instances[:og_limit] + + flattened_instances = [ + instance + for instance_group in sliced_instances + for instance in instance_group + ] + + self._instances = flattened_instances + + if len(self._instances) == 0: + raise ValueError("task.build_requests() did not find any docs!") + + if cache_requests and (not cached_instances or rewrite_requests_cache): + save_to_cache(file_name=cache_key, obj=instances) + + @abc.abstractmethod + def construct_requests(self, doc, ctx, **kwargs): + """Uses RequestFactory to construct Requests and returns an iterable of + Requests which will be sent to the LM. + + :param doc: + The document as returned from training_docs, validation_docs, or test_docs. + :param ctx: str + The context string, generated by fewshot_context. This includes the natural + language description, as well as the few shot examples, and the question + part of the document for `doc`. + :param doc_idx: int + The index of a document within `self.test_docs()` or `self.validation_docs()`, + whichever is the main split used. + :param repeats: int + TODO: update this docstring + The number of times each instance in a dataset is inferred on. Defaults to 1, + can be increased for techniques like majority voting. + """ + pass + + @abc.abstractmethod + def process_results(self, doc, results): + """Take a single document and the LM results and evaluates, returning a + dict where keys are the names of submetrics and values are the values of + the metric for that one document + + :param doc: + The document as returned from training_docs, validation_docs, or test_docs. + :param results: + The results of the requests created in construct_requests. + """ + pass + + @abc.abstractmethod + def aggregation(self): + """ + :returns: {str: [metric_score] -> float} + A dictionary where keys are the names of submetrics and values are + functions that aggregate a list of metric scores + """ + pass + + @abc.abstractmethod + def higher_is_better(self): + """ + :returns: {str: bool} + A dictionary where keys are the names of submetrics and values are + whether a higher value of the submetric is better + """ + pass + + def get_config(self, key: str) -> Any: + return getattr(self._config, key, None) + + @classmethod + def count_bytes(cls, doc): + """Used for byte-level perplexity metrics in rolling loglikelihood""" + return len(doc.encode("utf-8")) + + @classmethod + def count_words(cls, doc): + """Downstream loglikelihood_rolling perplexity tasks with custom word boundaries should override this!""" + return len(re.split(r"\s+", doc)) + + @utils.positional_deprecated + def fewshot_context( + self, + doc, + num_fewshot, + rnd=random.Random(1234), + description=None, + ): + """Returns a fewshot context string that is made up of a prepended description + (if provided), the `num_fewshot` number of examples, and an appended prompt example. + + :param doc: str + The document as returned from training_docs, validation_docs, or test_docs. + :param num_fewshot: int + The number of fewshot examples to provide in the returned context string. + :param rnd: random.Random + The pseudo-random number generator used to randomly sample examples. + WARNING: This is currently a required arg although it's optionalized with a default `None`. + :param description: str + The task's description that will be prepended to the fewshot examples. + :returns: str + The fewshot context. + """ + if rnd is None: + raise ValueError( + "A `random.Random` generator argument must be provided to `rnd`" + ) + + description = description if description else "" + + if num_fewshot == 0: + labeled_examples = "" + else: + # for sets with no training docs, draw from other set *but ensure no overlap with current doc* + if self.has_training_docs(): + fewshotex = self.fewshot_examples(k=num_fewshot, rnd=rnd) + else: + if self._fewshot_docs is None: + self._fewshot_docs = list( + self.validation_docs() + if self.has_validation_docs() + else self.test_docs() + ) + + fewshotex = rnd.sample(self._fewshot_docs, num_fewshot + 1) + + # get rid of the doc that's the one we're evaluating, if it's in the fewshot + fewshotex = [x for x in fewshotex if x != doc][:num_fewshot] + + labeled_examples = ( + "\n\n".join( + [ + self.doc_to_text(doc) + self.doc_to_target(doc) + for doc in fewshotex + ] + ) + + "\n\n" + ) + + example = self.doc_to_text(doc) + return description + labeled_examples + example + + def apply_filters(self) -> Optional[List[Instance]]: + """Iterates over FilterEnsembles and applies them to instances""" + if hasattr(self, "_filters"): + for f in self._filters: + f.apply(self._instances) + else: + eval_logger.warning("No filter defined, passing through instances") + return self._instances + + def dump_config(self) -> dict: + """Returns the config as a dictionary.""" + # TODO: this should only return the overrides applied to a non-YAML task's configuration. + # (num_fewshot) + return self.config.to_dict() + + def set_config(self, key: str, value: Any, update: bool = False) -> None: + """Set or update the configuration for a given key.""" + if key is None: + raise ValueError("Key must be provided.") + + if update: + current_value = getattr(self._config, key, {}) + if not isinstance(current_value, dict): + raise TypeError( + f"Expected a dict for key '{key}', got {type(current_value).__name__} instead." + ) + current_value.update(value) + else: + setattr(self._config, key, value) + + def override_metric(self, metric_name: str) -> None: + """ + Override the default metrics used for evaluation with custom metrics. + + Parameters: + - metric_name (str): The name of the custom metric to override. Should be registered in api.metrics. + """ + ( + self._metric_fn_list, + self._aggregation_list, + self._metric_fn_kwargs, + self._higher_is_better, + ) = ({}, {}, {}, {}) + self._metric_fn_list[metric_name] = get_metric(metric_name) + self._aggregation_list[metric_name] = get_metric_aggregation(metric_name) + self._higher_is_better[metric_name] = is_higher_better(metric_name) + self._metric_fn_kwargs[metric_name] = {} + if not isinstance(self, ConfigurableTask): + self.process_results = lambda x, y: {metric_name: get_metric(metric_name)} + self.aggregation = lambda: { + metric_name: get_metric_aggregation(metric_name) + } + setattr(self._config, "metric_list", [{"metric": metric_name}]) + setattr(self._config, "process_results", None) + + @property + def eval_docs(self) -> Union[datasets.Dataset, List[dict]]: + if self.has_test_docs(): + return self.test_docs() + elif self.has_validation_docs(): + return self.validation_docs() + else: + raise ValueError( + f"Task dataset (path={self.DATASET_PATH}, name={self.DATASET_NAME}) must have valid or test docs!" + ) + + def doc_iterator( + self, *, rank: int = 0, limit: Union[int, None] = None, world_size: int = 1 + ) -> Iterator[Tuple[int, Any]]: + limit = int(limit) if limit else None + doc_iterator = utils.create_iterator( + enumerate(self.eval_docs), + rank=int(rank), + limit=limit, + world_size=int(world_size), + ) + return doc_iterator + + +class ConfigurableTask(Task): + VERSION = "Yaml" + OUTPUT_TYPE = None + CONFIG = None + + def __init__( + self, + data_dir=None, + cache_dir=None, + download_mode=None, + config: Optional[dict] = None, + ) -> None: # TODO no super() call here + # Get pre-configured attributes + self._config = self.CONFIG + + # Use new configurations if there was no preconfiguration + if self.config is None: + self._config = TaskConfig(**config) + # Overwrite configs + else: + if config is not None: + self._config.__dict__.update(config) + + if self.config is None: + raise ValueError( + "Must pass a config to ConfigurableTask, either in cls.CONFIG or `config` kwarg" + ) + + if isinstance(self.config.metadata, dict): + if "version" in self.config.metadata: + self.VERSION = self.config.metadata["version"] + + if self.config.output_type is not None: + if self.config.output_type not in ALL_OUTPUT_TYPES: + raise ValueError( + f"Got invalid output_type '{self.config.output_type}', must be in '{','.join(ALL_OUTPUT_TYPES)}'" + ) + self.OUTPUT_TYPE = self.config.output_type + + if self.config.dataset_path is not None: + self.DATASET_PATH = self.config.dataset_path + + if self.config.dataset_name is not None: + self.DATASET_NAME = self.config.dataset_name + + self._metric_fn_list = {} + self._metric_fn_kwargs = {} + self._aggregation_list = {} + self._higher_is_better = {} + + if self.config.metric_list is None: + # TODO: handle this in TaskConfig.__post_init__ ? + _metric_list = DEFAULT_METRIC_REGISTRY[self.config.output_type] + + for metric_name in _metric_list: + self._metric_fn_list[metric_name] = get_metric(metric_name) + self._metric_fn_kwargs[metric_name] = {} + self._aggregation_list[metric_name] = get_metric_aggregation( + metric_name + ) + self._higher_is_better[metric_name] = is_higher_better(metric_name) + else: + for metric_config in self.config.metric_list: + if "metric" not in metric_config: + raise ValueError( + "'metric' key not provided for an entry in 'metric_list', must be specified!" + ) + metric_name = metric_config["metric"] + kwargs = { + key: metric_config[key] + for key in metric_config + if key + not in ["metric", "aggregation", "higher_is_better", "hf_evaluate"] + } + hf_evaluate_metric = ( + "hf_evaluate" in metric_config + and metric_config["hf_evaluate"] is True + ) + + if self.config.process_results is not None: + self._metric_fn_list[metric_name] = None + self._metric_fn_kwargs[metric_name] = {} + elif callable(metric_name): + metric_fn = metric_name.__call__ + metric_name = metric_name.__name__ + self._metric_fn_list[metric_name] = metric_fn + self._metric_fn_kwargs[metric_name] = kwargs + else: + self._metric_fn_list[metric_name] = get_metric( + metric_name, hf_evaluate_metric + ) + self._metric_fn_kwargs[metric_name] = kwargs + + if "aggregation" in metric_config: + agg_name = metric_config["aggregation"] + if isinstance(agg_name, str): + self._aggregation_list[metric_name] = get_aggregation(agg_name) + elif callable(agg_name): # noqa: E721 + self._aggregation_list[metric_name] = metric_config[ + "aggregation" + ] + else: + INV_AGG_REGISTRY = {v: k for k, v in AGGREGATION_REGISTRY.items()} + metric_agg = get_metric_aggregation(metric_name) + eval_logger.warning( + f"[Task: {self.config.task}] metric {metric_name} is defined, but aggregation is not. " + f"using default " + f"aggregation={INV_AGG_REGISTRY[metric_agg]}" + ) + self._aggregation_list[metric_name] = metric_agg + + if "higher_is_better" in metric_config: + self._higher_is_better[metric_name] = metric_config[ + "higher_is_better" + ] + else: + eval_logger.warning( + f"[Task: {self.config.task}] metric {metric_name} is defined, but higher_is_better is not. " + f"using default " + f"higher_is_better={is_higher_better(metric_name)}" + ) + self._higher_is_better[metric_name] = is_higher_better(metric_name) + + self.download(self.config.dataset_kwargs) + self._training_docs = None + self._fewshot_docs = None + + if self.config.filter_list is not None: + self._filters = [] + for filter_config in self.config.filter_list: + filter_name = filter_config["name"] + filter_functions = filter_config["filter"] + components = [] + for function in filter_functions: + kwargs = { + key: function[key] for key in function if key != "function" + } + components.append([function["function"], kwargs]) + filter_pipeline = build_filter_ensemble(filter_name, components) + self._filters.append(filter_pipeline) + else: + self._filters = [build_filter_ensemble("none", [["take_first", None]])] + + if self.config.use_prompt is not None: + eval_logger.info(f"loading prompt {self.config.use_prompt}") + self.prompt = get_prompt( + self.config.use_prompt, self.DATASET_PATH, self.DATASET_NAME + ) + else: + self.prompt = None + + if self.fewshot_docs() is not None: + self.sampler = samplers.get_sampler( + self.config.fewshot_config.get("sampler", "default") + if self.config.fewshot_config + else "default" + )(list(self.fewshot_docs()), self, rnd=random.Random(1234)) + + self.task_docs = self.eval_docs + + # Test One Doc + self.features = list(self.task_docs.features.keys()) + self.multiple_input = 0 + self.multiple_target = 0 + test_doc = self.task_docs[0] + test_text = self.doc_to_text(test_doc) + test_target = self.doc_to_target(test_doc) + + if self.config.doc_to_choice is not None: + test_choice = self.doc_to_choice(test_doc) + if not isinstance(test_choice, list): + eval_logger.error("doc_to_choice must return list") + else: + num_choice = len(test_choice) + + if isinstance(test_text, int): + self.multiple_input = num_choice + else: + test_choice = None + + if isinstance(test_target, list): + self.multiple_target = len(test_target) + else: + if (isinstance(test_target, int)) and (test_choice is not None): + test_target = test_choice[test_target] + else: + test_target = str(test_target) + + if test_choice is not None: + check_choices = test_choice + else: + check_choices = [test_target] + if self.config.doc_to_choice is not None: + for choice in check_choices: + choice_has_whitespace = True if choice[0].isspace() else False + delimiter_has_whitespace = ( + True + if self.config.target_delimiter.rstrip() + != self.config.target_delimiter + else False + ) + + if delimiter_has_whitespace and choice_has_whitespace: + eval_logger.debug( + f'Both target_delimiter "{self.config.target_delimiter}" and target choice: "{choice}" have whitespace' + ) + elif (not delimiter_has_whitespace) and (not choice_has_whitespace): + eval_logger.debug( + f'Both target_delimiter "{self.config.target_delimiter}" and target choice: "{choice}" do not have whitespace, ignore if the language you are evaluating on does not require/use whitespace' + ) + + def download(self, dataset_kwargs: Optional[Dict[str, Any]] = None) -> None: + self.dataset = datasets.load_dataset( + path=self.DATASET_PATH, + name=self.DATASET_NAME, + **dataset_kwargs if dataset_kwargs is not None else {}, + ) + + def has_training_docs(self) -> bool: + if self.config.training_split is not None: + return True + else: + return False + + def has_validation_docs(self) -> bool: + if self.config.validation_split is not None: + return True + else: + return False + + def has_test_docs(self) -> bool: + if self.config.test_split is not None: + return True + else: + return False + + def training_docs(self) -> datasets.Dataset: + if self.has_training_docs(): + if self.config.process_docs is not None: + return self.config.process_docs( + self.dataset[self.config.training_split] + ) + return self.dataset[self.config.training_split] + + def validation_docs(self) -> datasets.Dataset: + if self.has_validation_docs(): + if self.config.process_docs is not None: + return self.config.process_docs( + self.dataset[self.config.validation_split] + ) + return self.dataset[self.config.validation_split] + + def test_docs(self) -> datasets.Dataset: + if self.has_test_docs(): + if self.config.process_docs is not None: + return self.config.process_docs(self.dataset[self.config.test_split]) + return self.dataset[self.config.test_split] + + def fewshot_docs(self): + if self.config.fewshot_split is not None: + if self.config.process_docs is not None: + return self.config.process_docs(self.dataset[self.config.fewshot_split]) + return self.dataset[self.config.fewshot_split] + else: + if (self.config.num_fewshot is not None) and (self.config.num_fewshot > 0): + eval_logger.warning( + f"Task '{self.config.task}': " + "num_fewshot > 0 but fewshot_split is None. " + "using preconfigured rule." + ) + return super().fewshot_docs() + + @utils.positional_deprecated + def fewshot_context(self, doc: str, num_fewshot: int) -> str: + """Returns a fewshot context string that is made up of a prepended description + (if provided), the `num_fewshot` number of examples, and an appended prompt example. + + :param doc: str + The document as returned from training_docs, validation_docs, or test_docs. + :param num_fewshot: int + The number of fewshot examples to provide in the returned context string. + :returns: str + The fewshot context. + """ + if description := self.config.description: + description = utils.apply_template(self.config.description, doc) + + if num_fewshot == 0: + # always prepend the (possibly empty) task description + labeled_examples = description + else: + labeled_examples = description + self.sampler.get_context(doc, num_fewshot) + + example = self.doc_to_text(doc) + if self.multiple_input: + return labeled_examples + else: + if isinstance(example, str): + return labeled_examples + example + elif isinstance(example, list): + return [labeled_examples + ex for ex in example] + elif isinstance(example, int): + if self.config.doc_to_choice is not None: + choices = self.doc_to_choice(doc) + return labeled_examples + choices[example] + else: + return labeled_examples + str(example) + + def apply_filters(self): + """Iterates over FilterEnsembles and applies them to instances""" + if hasattr(self, "_filters"): + for f in self._filters: + f.apply(self._instances) + else: + eval_logger.warning("No filter defined, passing through instances") + return self._instances + + def should_decontaminate(self): + return self.config.should_decontaminate + + def doc_to_decontamination_query(self, doc): + if self.config.should_decontaminate: + if self.config.doc_to_decontamination_query is None: + return self.doc_to_text(doc) + else: + doc_to_decontamination_query = self.config.doc_to_decontamination_query + if doc_to_decontamination_query in self.features: + return doc[doc_to_decontamination_query] + elif callable(doc_to_decontamination_query): + return doc_to_decontamination_query(doc) + else: + return ast.literal_eval( + utils.apply_template( + self.config.doc_to_decontamination_query, doc + ) + ) + + def _process_doc(self, doc: dict) -> dict: + """ + Override this to process (detokenize, strip, replace, etc.) individual + documents. This can be used in a map over documents of a data split. + E.g. `map(self._process_doc, self.dataset["validation"])` + + :return: dict + The processed version of the specified `doc`. + """ + return doc + + def doc_to_text(self, doc): + if self.prompt is not None: + doc_to_text = self.prompt + else: + doc_to_text = self.config.doc_to_text + + if isinstance(doc_to_text, int): + return doc_to_text + elif isinstance(doc_to_text, str): + if doc_to_text in self.features: + # if self.config.doc_to_choice is not None: + # return self.doc_to_choice(doc)[doc[doc_to_text]] + # else: + return doc[doc_to_text] + else: + text_string = utils.apply_template(doc_to_text, doc) + if text_string.isdigit() and self._config.doc_to_choice is not None: + return ast.literal_eval(text_string) + else: + return text_string + elif callable(doc_to_text): + return doc_to_text(doc) + # Used when applying a Promptsource template + elif hasattr(doc_to_text, "apply"): + applied_prompt = doc_to_text.apply(doc) + if len(applied_prompt) == 2: + return applied_prompt[0] + else: + eval_logger.warning("Applied prompt returns empty string") + return self.config.fewshot_delimiter + else: + print(type(doc_to_text)) + raise TypeError + + def doc_to_target(self, doc: Mapping) -> Union[int, str, list]: + if self.prompt is not None: + doc_to_target = self.prompt + else: + doc_to_target = self.config.doc_to_target + + if isinstance(doc_to_target, int): + return doc_to_target + elif isinstance(doc_to_target, str): + if doc_to_target in self.features: + # if self.config.doc_to_choice is not None: + # return self.doc_to_choice(doc)[doc[doc_to_target]] + # else: + return doc[doc_to_target] + else: + target_string = utils.apply_template(doc_to_target, doc) + if target_string.isdigit() and self._config.doc_to_choice is not None: + return ast.literal_eval(target_string) + elif ( + len(target_string) >= 2 + and (target_string[0] == "[") + and (target_string[-1] == "]") + ): + try: + return ast.literal_eval(target_string) + except (SyntaxError, ValueError): + return target_string + else: + return target_string + elif isinstance(doc_to_target, list): + return doc_to_target + elif callable(doc_to_target): + return doc_to_target(doc) + # Used when applying a Promptsource template + elif hasattr(doc_to_target, "apply"): + applied_prompt = doc_to_target.apply(doc) + if len(applied_prompt) == 2: + return applied_prompt[1] + else: + eval_logger.warning("Applied prompt returns empty string") + return self.config.fewshot_delimiter + else: + raise TypeError + + def doc_to_choice(self, doc: Any) -> List[str]: + if self.prompt is not None: + doc_to_choice = self.prompt + elif self.config.doc_to_choice is None: + eval_logger.error("doc_to_choice was called but not set in config") + else: + doc_to_choice = self.config.doc_to_choice + + if isinstance(doc_to_choice, str): + if doc_to_choice in self.features: + return doc[doc_to_choice] + else: + return ast.literal_eval(utils.apply_template(doc_to_choice, doc)) + elif isinstance(doc_to_choice, list): + return doc_to_choice + elif isinstance(doc_to_choice, dict): + return list(doc_to_choice.values()) + elif callable(doc_to_choice): + return doc_to_choice(doc) + elif hasattr(doc_to_choice, "get_answer_choices_list"): + return doc_to_choice.get_answer_choices_list(doc) + else: + raise TypeError + + def construct_requests( + self, doc: dict, ctx: str, **kwargs + ) -> Union[List[Instance], Instance]: + if self.OUTPUT_TYPE == "loglikelihood": + arguments = (ctx, self.doc_to_target(doc)) + elif self.OUTPUT_TYPE == "loglikelihood_rolling": + arguments = (self.doc_to_target(doc),) + elif self.OUTPUT_TYPE == "multiple_choice": + choices = self.doc_to_choice(doc) + target_delimiter = self.config.target_delimiter + if self.multiple_input: + # If there are multiple inputs, choices are placed in the ctx + cont = self.doc_to_target(doc) + arguments = [ + (ctx + choice, f"{target_delimiter}{cont}") for choice in choices + ] + else: + # Otherwise they are placed in the continuation + arguments = [(ctx, f"{target_delimiter}{cont}") for cont in choices] + + request_list = [ + Instance( + request_type="loglikelihood", + doc=doc, + arguments=arg, + idx=i, + **kwargs, + ) + for i, arg in enumerate(arguments) + ] + # TODO: we should raise a warning telling users this will at most ~2x runtime. + if "acc_mutual_info" in self._metric_fn_list.keys(): + # if we are calculating multiple choice accuracy + # using mutual information instead of raw loglikelihood as metric, need unconditional lls. + + # here mutual info refers to calculating + # log(P(choice|ctx) / P(choice)) = log(P(choice|ctx)) - log(P(choice)) + # in other words normalizing by subtracting the unconditional logprob of each choice. + request_list.extend( + [ + Instance( + request_type="loglikelihood", + doc=doc, + arguments=("", "{}".format(choice)), + idx=i, + **kwargs, + ) + for i, choice in enumerate(choices) + ] + ) + return request_list + + elif self.OUTPUT_TYPE == "generate_until": + arguments = (ctx, deepcopy(self.config.generation_kwargs)) + + return Instance( + request_type=self.OUTPUT_TYPE, doc=doc, arguments=arguments, idx=0, **kwargs + ) + + def process_results(self, doc, results): + if callable(self.config.process_results): + return self.config.process_results(doc, results) + + result_dict = {} + use_metric = list(self._metric_fn_list.keys()) + if self.OUTPUT_TYPE == "loglikelihood": + results = results[0] + ll, is_greedy = results + return { + **({"perplexity": ll} if "perplexity" in use_metric else {}), + **({"acc": int(is_greedy)} if "acc" in use_metric else {}), + } + elif self.OUTPUT_TYPE == "loglikelihood_rolling": + (loglikelihood,) = results + _words = self.count_words(self.doc_to_target(doc)) + _bytes = self.count_bytes(self.doc_to_target(doc)) + return { + **( + {"word_perplexity": (loglikelihood, _words)} + if "word_perplexity" in use_metric + else {} + ), + **( + {"byte_perplexity": (loglikelihood, _bytes)} + if "byte_perplexity" in use_metric + else {} + ), + **( + {"bits_per_byte": (loglikelihood, _bytes)} + if "bits_per_byte" in use_metric + else {} + ), + } + elif self.OUTPUT_TYPE == "multiple_choice": + lls, is_greedy = zip(*results) + + # retrieve choices in List[str] form, to compute choice lengths, etc. + choices = self.doc_to_choice(doc) + completion_len = np.array([float(len(i)) for i in choices]) + + if ( + 2 * len(choices) == len(lls) + and "acc_mutual_info" in self._metric_fn_list.keys() + ): + # then we are doing mutual info. + # this stores the "dryrun" / unconditional answer loglikelihoods + lls_unconditional = lls[1::2] + if len(lls_unconditional) != len(choices): + raise ValueError + # and this stores our "regular" conditional loglikelihoods + lls = lls[::2] + + pred = np.argmax(lls) + pred_norm = np.argmax(lls / completion_len) + + if self.multiple_input: + gold = self.doc_to_text(doc) + else: + gold = self.doc_to_target(doc) + + gold_index_error = False + if isinstance(gold, list): + gold = [i if i < len(choices) else -100 for i in gold] + if -100 in gold: + gold_index_error = True + else: + if isinstance(gold, int): + gold = gold if gold < len(choices) else -100 + elif isinstance(gold, str): + gold = choices.index(gold) if gold in choices else -100 + + if gold == -100: + gold_index_error = True + + if gold_index_error: + eval_logger.warning( + f"Label index was not in within range of available choices," + f"Sample:\n\n{doc}\n\n" + ) + + if self.multiple_target: + acc = 1.0 if pred in gold else 0.0 + acc_norm = 1.0 if pred_norm in gold else 0.0 + exact_match = int(any([is_greedy[i] if i != -100 else 0 for i in gold])) + else: + acc = 1.0 if pred == gold else 0.0 + acc_norm = 1.0 if pred_norm == gold else 0.0 + # TODO: this gets score of 0 on arc_challenge for pythia-70m. need to test that this works properly + exact_match = int(is_greedy[gold]) if gold != -100 else 0 + + prob_norm = utils.softmax(lls) + + # TODO use keyword arguments to the metric? + # gold, pred, norm stuff, the original lls, + result_dict = { + **({"acc": acc} if "acc" in use_metric else {}), + **({"f1": (gold, pred)} if "f1" in use_metric else {}), + **({"mcc": (gold, pred)} if "mcc" in use_metric else {}), + **({"acc_norm": acc_norm} if "acc_norm" in use_metric else {}), + **({"exact_match": exact_match} if "exact_match" in use_metric else {}), + **( + {"brier_score": (gold, prob_norm)} + if "brier_score" in use_metric + else {} + ), + } + + if "acc_mutual_info" in use_metric: + lls_mutual_info = [ + ll_c - ll_u for ll_c, ll_u in zip(lls, lls_unconditional) + ] + acc_mutual_info = 1.0 if np.argmax(lls_mutual_info) == gold else 0.0 + result_dict["acc_mutual_info"] = acc_mutual_info + + elif self.OUTPUT_TYPE == "generate_until": + gold = self.doc_to_target(doc) + result = results[0] + if self.config.doc_to_choice is not None: + # If you set doc_to_choice, + # it assumes that doc_to_target returns a number. + choices = self.doc_to_choice(doc) + gold = choices[gold] + # we expect multiple_targets to be a list. + elif self.multiple_target: + gold = list(gold) + elif type(gold) != type(result): + # cast gold to the same type as result + gold = type(result)(gold) + + for metric in self._metric_fn_list.keys(): + if self.multiple_target: + # in the case where we have multiple targets, + # return true if any are true + # TODO: this may break for multipLe_target, non zero-or-1 metrics + scores = [] + if not isinstance(gold, list): + # sometimes, a multiple_target dataset has exceptions where one doc has only one string answer + # print(gold) + gold = [gold] + if metric == "exact_match": + result = [result for _ in range(len(gold))] + scores = self._metric_fn_list[metric]( + references=gold, + predictions=result, + **self._metric_fn_kwargs[metric], + )[metric] + result_score = 1.0 if scores > 0.0 else 0.0 + else: + for gold_option in gold: + try: + result_score = self._metric_fn_list[metric]( + references=[gold_option], + predictions=[result], + **self._metric_fn_kwargs[metric], + ) + except ( + TypeError + ): # TODO: this is hacky and I don't want to do it + result_score = self._metric_fn_list[metric]( + [gold_option, result] + ) + if isinstance(result_score, dict): + # TODO: this handles the case where HF evaluate returns a dict. + result_score = result_score[metric] + scores.append(result_score) + if any(scores): + result_score = 1.0 + else: + result_score = 0.0 + else: + try: + result_score = self._metric_fn_list[metric]( + references=[gold], + predictions=[result], + **self._metric_fn_kwargs[metric], + ) + except TypeError: # needed for now in order to use a different interface between our own metrics and HF Evaluate metrics + result_score = self._metric_fn_list[metric]([gold, result]) + if isinstance(result_score, dict): + # TODO: this handles the case where HF evaluate returns a dict. + result_score = result_score[metric] + result_dict[metric] = result_score + else: + raise ValueError( + f"Passed invalid output_type '{self.OUTPUT_TYPE}' ! Please use one of ", + "'loglikelihood', 'loglikelihood_rolling', 'generate_until' or 'multiple_choice'", + ) + + return result_dict + + def aggregation(self) -> dict: + return self._aggregation_list + + def higher_is_better(self) -> dict: + return self._higher_is_better + + def get_config(self, key: str) -> Any: + return getattr(self._config, key, None) + + def __repr__(self): + return ( + f"ConfigurableTask(task_name={getattr(self.config, 'task', None)}," + f"group_name={getattr(self.config, 'group', None)}," + f"output_type={self.OUTPUT_TYPE}," + f"num_fewshot={getattr(self.config, 'num_fewshot', None)}," + f"num_samples={len(self.eval_docs)})" + ) + + +class MultipleChoiceTask(Task): + OUTPUT_TYPE = "loglikelihood" + + def doc_to_target(self, doc: dict) -> str: + return " " + doc["choices"][doc["gold"]] + + def construct_requests(self, doc: dict, ctx: str, **kwargs) -> List[Instance]: + # TODO: add mutual info here? + return [ + Instance( + request_type="loglikelihood", + doc=doc, + arguments=(ctx, " {}".format(choice)), + idx=i, + **kwargs, + ) + for i, choice in enumerate(doc["choices"]) + ] + + def process_results(self, doc: dict, results: Iterable[Tuple[float, bool]]) -> dict: + results = [ + res[0] for res in results + ] # only retain loglikelihoods, discard is_greedy TODO: do we need is_greedy anywhere? + gold = doc["gold"] + + acc = 1.0 if np.argmax(results) == gold else 0.0 + completion_len = np.array([float(len(i)) for i in doc["choices"]]) + acc_norm = 1.0 if np.argmax(results / completion_len) == gold else 0.0 + + return { + "acc": acc, + "acc_norm": acc_norm, + } + + def higher_is_better(self) -> dict: + return { + "acc": True, + "acc_norm": True, + } + + def aggregation(self) -> dict: + return { + "acc": mean, + "acc_norm": mean, + } + + +class PerplexityTask(Task): + OUTPUT_TYPE = "loglikelihood_rolling" + + def has_training_docs(self) -> bool: + return False + + def fewshot_examples(self, k: int, rnd) -> List: + if k != 0: + raise ValueError( + "The number of fewshot examples must be 0 for perplexity tasks." + ) + return [] + + def fewshot_context(self, doc: dict, num_fewshot: int) -> Literal[""]: + if num_fewshot != 0: + raise ValueError( + "The number of fewshot examples must be 0 for perplexity tasks." + ) + + return "" + + def higher_is_better(self) -> dict: + return { + "word_perplexity": False, + "byte_perplexity": False, + "bits_per_byte": False, + } + + def doc_to_decontamination_query(self, doc): + return doc + + def doc_to_text(self, doc) -> str: + return "" + + def doc_to_target(self, doc): + return doc + + def construct_requests(self, doc: dict, ctx: Optional[str], **kwargs): + if bool(ctx): + raise ValueError + + return Instance( + request_type=self.OUTPUT_TYPE, + doc=doc, + arguments=(self.doc_to_target(doc),), + idx=0, + **kwargs, + ) + + def process_results(self, doc: dict, results: Tuple[float]) -> dict: + (loglikelihood,) = results + words = self.count_words(self.doc_to_target(doc)) + bytes_ = self.count_bytes(self.doc_to_target(doc)) + return { + "word_perplexity": (loglikelihood, words), + "byte_perplexity": (loglikelihood, bytes_), + "bits_per_byte": (loglikelihood, bytes_), + } + + def aggregation(self) -> dict: + return { + "word_perplexity": weighted_perplexity, + "byte_perplexity": weighted_perplexity, + "bits_per_byte": bits_per_byte, + } + + @classmethod + def count_bytes(cls, doc) -> int: + return len(doc.encode("utf-8")) + + @classmethod + def count_words(cls, doc) -> int: + """Downstream tasks with custom word boundaries should override this!""" + return len(re.split(r"\s+", doc)) diff --git a/lm-evaluation-harness/lm_eval/caching/__pycache__/cache.cpython-310.pyc b/lm-evaluation-harness/lm_eval/caching/__pycache__/cache.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a76acae5750f88e339646ed2e8927700d6b74691 Binary files /dev/null and b/lm-evaluation-harness/lm_eval/caching/__pycache__/cache.cpython-310.pyc differ diff --git a/lm-evaluation-harness/lm_eval/caching/cache.py b/lm-evaluation-harness/lm_eval/caching/cache.py new file mode 100644 index 0000000000000000000000000000000000000000..63691435215a05894d206f3f8218ab23c5d2e250 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/caching/cache.py @@ -0,0 +1,55 @@ +import hashlib +import os + +import dill + +from lm_eval.utils import eval_logger + + +MODULE_DIR = os.path.dirname(os.path.realpath(__file__)) + +OVERRIDE_PATH = os.getenv("LM_HARNESS_CACHE_PATH") + + +PATH = OVERRIDE_PATH if OVERRIDE_PATH else f"{MODULE_DIR}/.cache" + +# This should be sufficient for uniqueness +HASH_INPUT = "EleutherAI-lm-evaluation-harness" + +HASH_PREFIX = hashlib.sha256(HASH_INPUT.encode("utf-8")).hexdigest() + +FILE_SUFFIX = f".{HASH_PREFIX}.pickle" + + +def load_from_cache(file_name): + try: + path = f"{PATH}/{file_name}{FILE_SUFFIX}" + + with open(path, "rb") as file: + cached_task_dict = dill.loads(file.read()) + return cached_task_dict + + except Exception: + eval_logger.debug(f"{file_name} is not cached, generating...") + pass + + +def save_to_cache(file_name, obj): + if not os.path.exists(PATH): + os.mkdir(PATH) + + file_path = f"{PATH}/{file_name}{FILE_SUFFIX}" + + eval_logger.debug(f"Saving {file_path} to cache...") + with open(file_path, "wb") as file: + file.write(dill.dumps(obj)) + + +# NOTE the "key" param is to allow for flexibility +def delete_cache(key: str = ""): + files = os.listdir(PATH) + + for file in files: + if file.startswith(key) and file.endswith(FILE_SUFFIX): + file_path = f"{PATH}/{file}" + os.unlink(file_path) diff --git a/lm-evaluation-harness/lm_eval/filters/__init__.py b/lm-evaluation-harness/lm_eval/filters/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..a9fd87375ede42735c1ea3c4d44c9736692c6c1b --- /dev/null +++ b/lm-evaluation-harness/lm_eval/filters/__init__.py @@ -0,0 +1,48 @@ +from functools import partial +from typing import List, Union + +from lm_eval.api.filter import FilterEnsemble + +from . import extraction, selection, transformation + + +FILTER_REGISTRY = { + "take_first": selection.TakeFirstFilter, + "regex": extraction.RegexFilter, + "majority_vote": selection.MajorityVoteFilter, + "take_first_k": selection.TakeKFilter, + "remove_whitespace": extraction.WhitespaceFilter, + "lowercase": transformation.LowercaseFilter, + "uppercase": transformation.UppercaseFilter, + "map": transformation.MapFilter, + "multi_choice_regex": extraction.MultiChoiceRegexFilter, + # TODO: implement this filter. either it should take in an arbitrary "scoring"/reward function + # that takes an input and returns a scalar and then should select the max reward, + # or should implement different filters for different ways of handling a reward model's inference. + # "arg_max": selection.ArgMaxFilter, +} + + +def get_filter(filter_name: str) -> Union[type, str]: + if filter_name in FILTER_REGISTRY: + return FILTER_REGISTRY[filter_name] + else: + return filter_name + + +def build_filter_ensemble( + filter_name: str, components: List[List[str]] +) -> FilterEnsemble: + """ + Create a filtering pipeline. + """ + filters = [] + for function, kwargs in components: + if kwargs is None: + kwargs = {} + # create a filter given its name in the registry + f = partial(get_filter(function), **kwargs) + # add the filter as a pipeline step + filters.append(f) + + return FilterEnsemble(name=filter_name, filters=filters) diff --git a/lm-evaluation-harness/lm_eval/filters/__pycache__/__init__.cpython-310.pyc b/lm-evaluation-harness/lm_eval/filters/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8231fc699e0761a4c6897d2473a37c5a2104edf8 Binary files /dev/null and b/lm-evaluation-harness/lm_eval/filters/__pycache__/__init__.cpython-310.pyc differ diff --git a/lm-evaluation-harness/lm_eval/filters/__pycache__/extraction.cpython-310.pyc b/lm-evaluation-harness/lm_eval/filters/__pycache__/extraction.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..eabde463f66c0e461ec69ae7f01721267407414c Binary files /dev/null and b/lm-evaluation-harness/lm_eval/filters/__pycache__/extraction.cpython-310.pyc differ diff --git a/lm-evaluation-harness/lm_eval/filters/__pycache__/selection.cpython-310.pyc b/lm-evaluation-harness/lm_eval/filters/__pycache__/selection.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0e5d29cd7b147cf2c3f891592a80836bd4c30d7e Binary files /dev/null and b/lm-evaluation-harness/lm_eval/filters/__pycache__/selection.cpython-310.pyc differ diff --git a/lm-evaluation-harness/lm_eval/filters/__pycache__/transformation.cpython-310.pyc b/lm-evaluation-harness/lm_eval/filters/__pycache__/transformation.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4225a2b6585a190a9a67eea8427f99627b5f077c Binary files /dev/null and b/lm-evaluation-harness/lm_eval/filters/__pycache__/transformation.cpython-310.pyc differ diff --git a/lm-evaluation-harness/lm_eval/filters/decontamination.py b/lm-evaluation-harness/lm_eval/filters/decontamination.py new file mode 100644 index 0000000000000000000000000000000000000000..676c1e74496c9b135dcc448c2020b5c799c1fb10 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/filters/decontamination.py @@ -0,0 +1,24 @@ +from lm_eval.api.filter import Filter + + +class DecontaminationFilter(Filter): + + """ + A filter which evaluates + """ + + name = "track_decontamination" + + def __init__(self, path) -> None: + """ + + TODO: make sure only ever run one time on the train set (should this be cached as a class var? keyed by value for "path"). + should further cache result on a given (task_name, doc_id) + """ + self._decontam_results = None + + def apply(self, resps, docs) -> None: + """ + Return {"no_contamination", "only_contamination"} keys for the 2 different subsets + """ + pass diff --git a/lm-evaluation-harness/lm_eval/filters/extraction.py b/lm-evaluation-harness/lm_eval/filters/extraction.py new file mode 100644 index 0000000000000000000000000000000000000000..2a593747825dc4485fc00524a56a9cd7260dadd9 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/filters/extraction.py @@ -0,0 +1,183 @@ +import re +import sys +import unicodedata + +from lm_eval.api.filter import Filter + + +class RegexFilter(Filter): + """ """ + + def __init__( + self, + regex_pattern: str = r"#### (\-?[0-9\.\,]+)", + group_select=0, + fallback: str = "[invalid]", + ) -> None: + """ + pass a string `regex` to run `re.compile(r"regex")` on. + `fallback` defines the output returned if no matches for the regex are located. + """ + self.regex_pattern = regex_pattern + self.regex = re.compile(regex_pattern) + self.group_select = group_select + self.fallback = fallback + + def apply(self, resps, docs): + # here, we assume we have a list, in which each element is + # a list of model responses for some particular input/target pair. + # so we process each of these (same input/target response sets) + # independently (and keep them a list.) + def filter_set(inst): + filtered = [] + for resp in inst: + match = self.regex.findall(resp) + if match: + match = match[self.group_select] + if isinstance(match, tuple): + match = [m for m in match if m][0] + match = match.strip() + else: + match = self.fallback + filtered.append(match) + return filtered + + # print(resps) + filtered_resps = list(map(lambda x: filter_set(x), resps)) + # print(filtered_resps) + + return filtered_resps + + +class WhitespaceFilter(Filter): + """ """ + + def __init__(self) -> None: + pass + + def apply(self, resps, docs): + def filter_set(inst): + filtered_resp = [] + for resp in inst: + if resp.startswith(" "): + resp = resp[1:] + + filtered_resp.append(resp) + + return filtered_resp + + filtered_resps = [filter_set(resp) for resp in resps] + + return filtered_resps + + +class MultiChoiceRegexFilter(RegexFilter): + """ + A filter used to extract a model's answer on multiple choice questions with + letter answers. assumes each document has a "choices" field + containing the list of answer choices and that the answer label symbols + are of the form (A), (B), (C), ... or A, B, C. + """ + + def __init__( + self, + regex_pattern: str = r"#### (\-?[0-9\.\,]+)", + group_select=0, + fallback: str = "[invalid]", + ignore_case=False, + ignore_punctuation=False, + regexes_to_ignore=None, + ) -> None: + """ + regex_pattern: The basic regex pattern to use. If fails to match, we will use the customized match procedure + - step 1 : We parse the choices between ([A-Z])s then try to find these choices in the response. + - step 2 : We parse the choice with regex :[\s]*([A-?]), where ? varies by number of choices. + group_select: Selects the (group_select)th match from the findall result. + ignore_case: Ignores the case during step 1 matching + ignore_punctuation: Remove the punctuation during step 1 matching + regexes_to_ignore: Remove these regexes during step 1 matching + """ + super().__init__(regex_pattern, group_select, fallback) + self.ignore_case = ignore_case + self.ignore_punctuation = ignore_punctuation + self.regexes_to_ignore = regexes_to_ignore + + def apply(self, resps, docs): + # here, we assume we have a list, in which each element is + # a list of model responses for some particular input/target pair. + # so we process each of these (same input/target response sets) + # independently (and keep them a list.) + + def find_match(regex, resp, convert_dict={}): + match = regex.findall(resp) + if match: + match = match[self.group_select] + if isinstance(match, tuple): + match = [m for m in match if m][0] + match = match.strip() + if match and match in convert_dict: + match = convert_dict[match] + return match + + punct_tbl = dict.fromkeys( + i + for i in range(sys.maxunicode) + if unicodedata.category(chr(i)).startswith("P") + ) + + def filter_ignores(st): + if self.regexes_to_ignore is not None: + for s in self.regexes_to_ignore: + st = re.sub(s, "", st) + + if self.ignore_case: + st = st.lower() + + if self.ignore_punctuation: + # https://stackoverflow.com/a/266162 + st = st.translate(punct_tbl) + return st + + filtered_resps = [] + + for r, doc in zip(resps, docs): + fallback_regexes = [] + choice_to_alpha = {} + next_alpha = "A" + + without_paren_fallback_regexes = [] + without_paren_to_target = {} + + choices = doc["choices"] + for c in choices: + m = filter_ignores(c.strip()) + fallback_regexes.append(f"{re.escape(m)}") + choice_to_alpha[m] = f"({next_alpha})" + + without_paren_fallback_regexes.append(next_alpha) + without_paren_to_target[next_alpha] = f"({next_alpha})" + + next_alpha = chr(ord(next_alpha) + 1) + fallback_regex = re.compile("|".join(fallback_regexes)) + without_paren_fallback_regex = "|".join(without_paren_fallback_regexes) + without_paren_fallback_regex = re.compile( + f":[\s]*({without_paren_fallback_regex})" + ) + + filtered = [] + for resp in r: + match = find_match(self.regex, resp) + if not match: + match = find_match( + fallback_regex, filter_ignores(resp), choice_to_alpha + ) + if not match: + match = find_match( + without_paren_fallback_regex, resp, without_paren_to_target + ) + if not match: + match = self.fallback + filtered.append(match) + filtered_resps.append(filtered) + + return filtered_resps diff --git a/lm-evaluation-harness/lm_eval/filters/selection.py b/lm-evaluation-harness/lm_eval/filters/selection.py new file mode 100644 index 0000000000000000000000000000000000000000..01001fa37721db11a7276f4ac36582088fa6791f --- /dev/null +++ b/lm-evaluation-harness/lm_eval/filters/selection.py @@ -0,0 +1,52 @@ +from collections import Counter + +from lm_eval.api.filter import Filter + + +class TakeFirstFilter(Filter): + def __init__(self) -> None: + """ + Can define custom behavior here, if an individual instantiation of a Filter class should have state. + """ + + def apply(self, resps, docs): + """ + Assuming each entry of `resps` is a list of model responses, we discard all but the first response. + """ + return map(lambda r: r[0], resps) + + +class TakeKFilter(Filter): + def __init__(self, **kwargs) -> None: + self.k = kwargs.pop("k") + + super().__init__(**kwargs) + + def apply(self, resps, docs): + # need resp to be subscriptable to check below + resps = list(resps) + # check we have at least k responses per doc, else we can't take the first k + assert ( + len(resps[0]) >= self.k + ), f"Need at least {self.k} responses per doc to take first {self.k}, but got {len(resps[0])} only! Please increase TaskConfig.repeats ." + return map(lambda r: r[: self.k], resps) + + +class MajorityVoteFilter(Filter): + def __init__(self) -> None: + """ + Can define custom behavior here, if an individual instantiation of a Filter class should have state. + """ + + def apply(self, resps, docs): + """ + Each entry of `resps` is a list of model responses. + We select the response that occurs most frequently in each entry of `resps`. + """ + + def select_majority(resp): + counts = Counter(resp) + vote = counts.most_common(1)[0][0] + return vote + + return map(lambda r: [select_majority(r)], resps) diff --git a/lm-evaluation-harness/lm_eval/filters/transformation.py b/lm-evaluation-harness/lm_eval/filters/transformation.py new file mode 100644 index 0000000000000000000000000000000000000000..41d03df7e1c688513eb0a2163aee489c1fd0f11d --- /dev/null +++ b/lm-evaluation-harness/lm_eval/filters/transformation.py @@ -0,0 +1,52 @@ +from lm_eval.api.filter import Filter + + +class LowercaseFilter(Filter): + def __init__(self) -> None: + pass + + def apply(self, resps, docs): + def filter_set(inst): + return [resp.lower() for resp in inst] + + return [filter_set(resp) for resp in resps] + + +class UppercaseFilter(Filter): + def __init__(self) -> None: + pass + + def apply(self, resps, docs): + def filter_set(inst): + return [resp.upper() for resp in inst] + + return [filter_set(resp) for resp in resps] + + +class MapFilter(Filter): + def __init__(self, mapping_dict: dict = None, default_value=None) -> None: + """ + Initializes the MapFilter with a given mapping dictionary and default value. + + Args: + - mapping_dict (dict): A dictionary containing the key-value mappings. + Default is an empty dictionary. + - default_value (Any): The value to be returned when a key is not found in the mapping_dict. + Default is None. + + Example: + mapper = MapFilter({'A': 1, 'B': 2}, default_value=0) + """ + if mapping_dict is None: + mapping_dict = {} + assert isinstance( + mapping_dict, dict + ), "Provided mapping_dict is not a dictionary" + self.mapping_dict = mapping_dict + self.default_value = default_value + + def apply(self, resps, docs): + def filter_set(inst): + return [self.mapping_dict.get(resp, self.default_value) for resp in inst] + + return [filter_set(resp) for resp in resps] diff --git a/lm-evaluation-harness/lm_eval/models/__init__.py b/lm-evaluation-harness/lm_eval/models/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..698c912f277fb1de6cca0ab4068e399bcbd29607 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/models/__init__.py @@ -0,0 +1,27 @@ +from . import ( + anthropic_llms, + dummy, + gguf, + huggingface, + mamba_lm, + nemo_lm, + neuralmagic, + neuron_optimum, + openai_completions, + optimum_lm, + textsynth, + vllm_causallms, +) + + +# TODO: implement __all__ + + +try: + # enable hf hub transfer if available + import hf_transfer # type: ignore # noqa + import huggingface_hub.constants # type: ignore + + huggingface_hub.constants.HF_HUB_ENABLE_HF_TRANSFER = True +except ImportError: + pass diff --git a/lm-evaluation-harness/lm_eval/models/__pycache__/__init__.cpython-310.pyc b/lm-evaluation-harness/lm_eval/models/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a70afb6ceea5c1c6f284aced5785525117bda243 Binary files /dev/null and b/lm-evaluation-harness/lm_eval/models/__pycache__/__init__.cpython-310.pyc differ diff --git a/lm-evaluation-harness/lm_eval/models/__pycache__/anthropic_llms.cpython-310.pyc b/lm-evaluation-harness/lm_eval/models/__pycache__/anthropic_llms.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..65ac2b9dff86f7645a5e2f9d32122701d508564a Binary files /dev/null and b/lm-evaluation-harness/lm_eval/models/__pycache__/anthropic_llms.cpython-310.pyc differ diff --git a/lm-evaluation-harness/lm_eval/models/__pycache__/dummy.cpython-310.pyc b/lm-evaluation-harness/lm_eval/models/__pycache__/dummy.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c4cf92226c394717812981cef5ab56ff962d949b Binary files /dev/null and b/lm-evaluation-harness/lm_eval/models/__pycache__/dummy.cpython-310.pyc differ diff --git a/lm-evaluation-harness/lm_eval/models/__pycache__/gguf.cpython-310.pyc b/lm-evaluation-harness/lm_eval/models/__pycache__/gguf.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2de17f68f6723c140cca3328a1d146e361a56ae2 Binary files /dev/null and b/lm-evaluation-harness/lm_eval/models/__pycache__/gguf.cpython-310.pyc differ diff --git a/lm-evaluation-harness/lm_eval/models/__pycache__/huggingface.cpython-310.pyc b/lm-evaluation-harness/lm_eval/models/__pycache__/huggingface.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bdcc7f897fc78e3ee583ff5a3080e47dd8f775ce Binary files /dev/null and b/lm-evaluation-harness/lm_eval/models/__pycache__/huggingface.cpython-310.pyc differ diff --git a/lm-evaluation-harness/lm_eval/models/__pycache__/mamba_lm.cpython-310.pyc b/lm-evaluation-harness/lm_eval/models/__pycache__/mamba_lm.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0c8e789f29d7a8af4e68d3a6521b216e399e7ca5 Binary files /dev/null and b/lm-evaluation-harness/lm_eval/models/__pycache__/mamba_lm.cpython-310.pyc differ diff --git a/lm-evaluation-harness/lm_eval/models/__pycache__/nemo_lm.cpython-310.pyc b/lm-evaluation-harness/lm_eval/models/__pycache__/nemo_lm.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e418ff2d8fd1bc557f97811211853f6f7a41edab Binary files /dev/null and b/lm-evaluation-harness/lm_eval/models/__pycache__/nemo_lm.cpython-310.pyc differ diff --git a/lm-evaluation-harness/lm_eval/models/__pycache__/neuralmagic.cpython-310.pyc b/lm-evaluation-harness/lm_eval/models/__pycache__/neuralmagic.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..369c3c204c11f379d0c312950ce934dfc403ea29 Binary files /dev/null and b/lm-evaluation-harness/lm_eval/models/__pycache__/neuralmagic.cpython-310.pyc differ diff --git a/lm-evaluation-harness/lm_eval/models/__pycache__/neuron_optimum.cpython-310.pyc b/lm-evaluation-harness/lm_eval/models/__pycache__/neuron_optimum.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4bac2a65a57249c41dcef882ddd5881220f12af3 Binary files /dev/null and b/lm-evaluation-harness/lm_eval/models/__pycache__/neuron_optimum.cpython-310.pyc differ diff --git a/lm-evaluation-harness/lm_eval/models/__pycache__/openai_completions.cpython-310.pyc b/lm-evaluation-harness/lm_eval/models/__pycache__/openai_completions.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a43009f7fd1dc71b13af85a0f8924c38167379a4 Binary files /dev/null and b/lm-evaluation-harness/lm_eval/models/__pycache__/openai_completions.cpython-310.pyc differ diff --git a/lm-evaluation-harness/lm_eval/models/__pycache__/optimum_lm.cpython-310.pyc b/lm-evaluation-harness/lm_eval/models/__pycache__/optimum_lm.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2f4873b5c9b8b9516887c35a9249add44602a076 Binary files /dev/null and b/lm-evaluation-harness/lm_eval/models/__pycache__/optimum_lm.cpython-310.pyc differ diff --git a/lm-evaluation-harness/lm_eval/models/__pycache__/textsynth.cpython-310.pyc b/lm-evaluation-harness/lm_eval/models/__pycache__/textsynth.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dd2136109b226038f1d64e7b2d70e3e5ecf6ec7f Binary files /dev/null and b/lm-evaluation-harness/lm_eval/models/__pycache__/textsynth.cpython-310.pyc differ diff --git a/lm-evaluation-harness/lm_eval/models/__pycache__/utils.cpython-310.pyc b/lm-evaluation-harness/lm_eval/models/__pycache__/utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..13c32eade586cd21bf43b3cd167ae762d610ce1b Binary files /dev/null and b/lm-evaluation-harness/lm_eval/models/__pycache__/utils.cpython-310.pyc differ diff --git a/lm-evaluation-harness/lm_eval/models/__pycache__/vllm_causallms.cpython-310.pyc b/lm-evaluation-harness/lm_eval/models/__pycache__/vllm_causallms.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d7aad027b868bc2af272087c85e457638c9b767d Binary files /dev/null and b/lm-evaluation-harness/lm_eval/models/__pycache__/vllm_causallms.cpython-310.pyc differ diff --git a/lm-evaluation-harness/lm_eval/models/anthropic_llms.py b/lm-evaluation-harness/lm_eval/models/anthropic_llms.py new file mode 100644 index 0000000000000000000000000000000000000000..8645aadb02297dc1a269be91cc57c4c720a3f454 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/models/anthropic_llms.py @@ -0,0 +1,360 @@ +from typing import Any, List, Tuple + +from tqdm import tqdm + +from lm_eval import utils +from lm_eval.api.model import LM +from lm_eval.api.registry import register_model +from lm_eval.models.utils import retry_on_specific_exceptions + + +eval_logger = utils.eval_logger + + +def anthropic_completion( + client, #: anthropic.Anthropic, + model: str, + prompt: str, + max_tokens_to_sample: int, + temperature: float, + stop: List[str], + **kwargs: Any, +) -> str: + """Wrapper function around the Anthropic completion API client with exponential back-off + in case of RateLimitError. + + params: + client: anthropic.Anthropic + Anthropic API client + model: str + Anthropic model e.g. 'claude-instant-v1', 'claude-2' + prompt: str + Prompt to feed to the model + max_tokens_to_sample: int + Maximum number of tokens to sample from the model + temperature: float + Sampling temperature + stop: List[str] + List of stop sequences + kwargs: Any + Additional model_args to pass to the API client + """ + + try: + import anthropic + except ModuleNotFoundError: + raise Exception( + "attempted to use 'anthropic' LM type, but package `anthropic` is not installed. \ +please install anthropic via `pip install 'lm-eval[anthropic]'` or `pip install -e '.[anthropic]'`", + ) + + def _exception_callback(e: Exception, sleep_time: float) -> None: + eval_logger.warning( + f"RateLimitError occurred: {e.__cause__}\n Retrying in {sleep_time} seconds" + ) + + @retry_on_specific_exceptions( + on_exceptions=[anthropic.RateLimitError], + max_retries=None, # retry forever, consider changing + on_exception_callback=_exception_callback, + ) + def completion(): + response = client.completions.create( + prompt=f"{anthropic.HUMAN_PROMPT} {prompt}{anthropic.AI_PROMPT}", + model=model, + # NOTE: Claude really likes to do CoT, and overly aggressive stop sequences + # (e.g. gsm8k's ":") may truncate a lot of the input. + stop_sequences=[anthropic.HUMAN_PROMPT] + stop, + max_tokens_to_sample=max_tokens_to_sample, + temperature=temperature, + **kwargs, + ) + return response.completion + + return completion() + + +def anthropic_chat( + client, #: anthropic.Anthropic, + model: str, + prompt: str, + max_tokens: int, + temperature: float, + stop: List[str], + **kwargs: Any, +) -> str: + """Wrapper function around the Anthropic completion API client with exponential back-off + in case of RateLimitError. + + params: + client: anthropic.Anthropic + Anthropic API client + model: str + Anthropic model e.g. 'claude-3-opus-20240229', 'claude-3-sonnet-20240229' + prompt: str + Prompt to feed to the model + max_tokens: int + Maximum number of tokens to sample from the model + temperature: float + Sampling temperature + stop: List[str] + List of stop sequences + kwargs: Any + Additional model_args to pass to the API client + """ + + try: + import anthropic + except ModuleNotFoundError: + raise Exception( + "attempted to use 'anthropic' LM type, but package `anthropic` is not installed. \ +please install anthropic via `pip install 'lm-eval[anthropic]'` or `pip install -e '.[anthropic]'`", + ) + + def _exception_callback(e: Exception, sleep_time: float) -> None: + eval_logger.warning( + f"RateLimitError occurred: {e.__cause__}\n Retrying in {sleep_time} seconds" + ) + + @retry_on_specific_exceptions( + on_exceptions=[ + anthropic.RateLimitError, + anthropic.APIConnectionError, + anthropic.APIStatusError, + ], + max_retries=None, # retry forever, consider changing + on_exception_callback=_exception_callback, + ) + def messages(): + response = client.messages.create( + model=model, + max_tokens=max_tokens, + temperature=temperature, + messages=[{"role": "user", "content": f"{prompt}"}], + **kwargs, + ) + return response.content[0].text + + return messages() + + +@register_model("anthropic") +class AnthropicLM(LM): + REQ_CHUNK_SIZE = 20 # TODO: not used + + def __init__( + self, + batch_size: int = 1, + model: str = "claude-2.0", + max_tokens_to_sample: int = 256, + temperature: float = 0, # defaults to 1 + **kwargs, # top_p, top_k, etc. + ) -> None: + """Anthropic API wrapper. + + :param model: str + Anthropic model e.g. 'claude-instant-v1', 'claude-2' + :param max_tokens_to_sample: int + Maximum number of tokens to sample from the model + :param temperature: float + Sampling temperature + :param kwargs: Any + Additional model_args to pass to the API client + """ + super().__init__() + + try: + import anthropic + except ModuleNotFoundError: + raise Exception( + "attempted to use 'anthropic' LM type, but package `anthropic` is not installed. \ +please install anthropic via `pip install 'lm-eval[anthropic]'` or `pip install -e '.[anthropic]'`", + ) + + self.model = model + # defaults to os.environ.get("ANTHROPIC_API_KEY") + self.client = anthropic.Anthropic() + self.temperature = temperature + self.max_tokens_to_sample = max_tokens_to_sample + self.tokenizer = self.client.get_tokenizer() + self.kwargs = kwargs + + @property + def eot_token_id(self): + # Not sure but anthropic.HUMAN_PROMPT ? + raise NotImplementedError("No idea about anthropic tokenization.") + + @property + def max_length(self) -> int: + return 2048 + + @property + def max_gen_toks(self) -> int: + return self.max_tokens_to_sample + + @property + def batch_size(self): + # Isn't used because we override _loglikelihood_tokens + raise NotImplementedError("No support for logits.") + + @property + def device(self): + # Isn't used because we override _loglikelihood_tokens + raise NotImplementedError("No support for logits.") + + def tok_encode(self, string: str) -> List[int]: + return self.tokenizer.encode(string).ids + + def tok_decode(self, tokens: List[int]) -> str: + return self.tokenizer.decode(tokens) + + def _loglikelihood_tokens(self, requests, disable_tqdm: bool = False): + raise NotImplementedError("No support for logits.") + + def generate_until(self, requests, disable_tqdm: bool = False) -> List[str]: + try: + import anthropic + except ModuleNotFoundError: + raise Exception( + "attempted to use 'anthropic' LM type, but package `anthropic` is not installed. \ +please install anthropic via `pip install 'lm-eval[anthropic]'` or `pip install -e '.[anthropic]'`", + ) + + if not requests: + return [] + + _requests: List[Tuple[str, dict]] = [req.args for req in requests] + + res = [] + for request in tqdm(_requests, disable=disable_tqdm): + try: + inp = request[0] + request_args = request[1] + # generation_kwargs + until = request_args.get("until") + max_gen_toks = request_args.get("max_gen_toks", self.max_length) + temperature = request_args.get("temperature", self.temperature) + response = anthropic_completion( + client=self.client, + model=self.model, + prompt=inp, + max_tokens_to_sample=max_gen_toks, + temperature=temperature, # TODO: implement non-greedy sampling for Anthropic + stop=until, # type: ignore + **self.kwargs, + ) + res.append(response) + + self.cache_hook.add_partial("generate_until", request, response) + except anthropic.APIConnectionError as e: # type: ignore # noqa: F821 + eval_logger.critical(f"Server unreachable: {e.__cause__}") + break + except anthropic.APIStatusError as e: # type: ignore # noqa: F821 + eval_logger.critical(f"API error {e.status_code}: {e.message}") + break + + return res + + def _model_call(self, inps): + # Isn't used because we override _loglikelihood_tokens + raise NotImplementedError() + + def _model_generate(self, context, max_length, eos_token_id): + # Isn't used because we override generate_until + raise NotImplementedError() + + def loglikelihood(self, requests, disable_tqdm: bool = False): + raise NotImplementedError("No support for logits.") + + def loglikelihood_rolling(self, requests, disable_tqdm: bool = False): + raise NotImplementedError("No support for logits.") + + +@register_model("anthropic-chat", "anthropic-chat-completions") +class AnthropicChatLM(AnthropicLM): + REQ_CHUNK_SIZE = 20 # TODO: not used + + def __init__( + self, + model: str, + batch_size: int = 1, + max_tokens: int = 256, + temperature: float = 0, # defaults to 1 + **kwargs, # top_p, top_k, etc. + ) -> None: + """Anthropic API wrapper. + + :param model: str + Anthropic model e.g. 'claude-3-opus-20240229', 'claude-3-sonnet-20240229' + :param max_tokens: int + Maximum number of tokens to sample from the model + :param temperature: float + Sampling temperature + :param kwargs: Any + Additional model_args to pass to the API client + """ + super().__init__() + + try: + import anthropic + except ModuleNotFoundError: + raise Exception( + "attempted to use 'anthropic' LM type, but package `anthropic` is not installed. \ +please install anthropic via `pip install 'lm-eval[anthropic]'` or `pip install -e '.[anthropic]'`", + ) + + self.model = model + # defaults to os.environ.get("ANTHROPIC_API_KEY") + self.client = anthropic.Anthropic() + self.temperature = temperature + self.max_token = max_tokens + self.tokenizer = self.client.get_tokenizer() + self.kwargs = kwargs + + @property + def max_gen_toks(self) -> int: + return self.max_tokens + + def generate_until(self, requests) -> List[str]: + try: + import anthropic + except ModuleNotFoundError: + raise Exception( + "attempted to use 'anthropic' LM type, but package `anthropic` is not installed. \ +please install anthropic via `pip install 'lm-eval[anthropic]'` or `pip install -e '.[anthropic]'`", + ) + + if not requests: + return [] + + _requests: List[Tuple[str, dict]] = [req.args for req in requests] + + res = [] + for request in tqdm(_requests): + try: + inp = request[0] + request_args = request[1] + # generation_kwargs + until = request_args.get("until") + max_tokens = request_args.get("max_gen_toks", self.max_length) + temperature = request_args.get("temperature", self.temperature) + response = anthropic_chat( + client=self.client, + model=self.model, + prompt=inp, + max_tokens=max_tokens, + temperature=temperature, # TODO: implement non-greedy sampling for Anthropic + stop=until, # type: ignore + **self.kwargs, + ) + res.append(response) + + self.cache_hook.add_partial("generate_until", request, response) + except anthropic.APIConnectionError as e: # type: ignore # noqa: F821 + eval_logger.critical(f"Server unreachable: {e.__cause__}") + break + except anthropic.APIStatusError as e: # type: ignore # noqa: F821 + eval_logger.critical(f"API error {e.status_code}: {e.message}") + break + + return res diff --git a/lm-evaluation-harness/lm_eval/models/dummy.py b/lm-evaluation-harness/lm_eval/models/dummy.py new file mode 100644 index 0000000000000000000000000000000000000000..83737739672724f5fd6581ad59955e555b770ec4 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/models/dummy.py @@ -0,0 +1,41 @@ +import random + +from tqdm import tqdm + +from lm_eval.api.model import LM +from lm_eval.api.registry import register_model + + +@register_model("dummy") +class DummyLM(LM): + def __init__(self) -> None: + super().__init__() + + @classmethod + def create_from_arg_string(cls, arg_string, additional_config=None): + return cls() + + def loglikelihood(self, requests, disable_tqdm: bool = False): + res = [] + + for _ in tqdm(requests, disable=disable_tqdm): + res.append((-random.random(), False)) + + return res + + def generate_until(self, requests, disable_tqdm: bool = False): + res = [] + + for ctx, _ in tqdm(requests, disable=disable_tqdm): + res.append("lol") + assert ctx.strip() != "" + + return res + + def loglikelihood_rolling(self, requests, disable_tqdm: bool = False): + res = [] + + for _ in tqdm(requests, disable=disable_tqdm): + res.append(-random.random()) + + return res diff --git a/lm-evaluation-harness/lm_eval/models/gguf.py b/lm-evaluation-harness/lm_eval/models/gguf.py new file mode 100644 index 0000000000000000000000000000000000000000..ee1362c6b0bedd8f831a1a4f93821b8c661f25e3 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/models/gguf.py @@ -0,0 +1,130 @@ +import logging +import time + +import requests +from requests.exceptions import RequestException +from tqdm import tqdm + +from lm_eval.api.model import LM +from lm_eval.api.registry import register_model + + +logger = logging.getLogger(__name__) + + +def get_result(logprobs, context_length): + is_greedy = True + offsets = logprobs["text_offset"] + tokens = logprobs["tokens"] + tokens_logprobs = logprobs["token_logprobs"] + + idx = 0 + while offsets[idx] < context_length: + idx += 1 + continuation_logprobs = sum(tokens_logprobs[idx:-1]) + for i in range(idx, len(tokens)): + token = tokens[i] + top_tokens = logprobs["top_logprobs"][i] + top_token = max(top_tokens.keys(), key=lambda x: top_tokens[x]) + if top_token != token: + is_greedy = False + break + + return continuation_logprobs, is_greedy + + +@register_model("gguf", "ggml") +class GGUFLM(LM): + def __init__(self, base_url=None, max_length=2048, **kwargs): + super().__init__() + self.base_url = base_url + assert self.base_url, "must pass `base_url` to use GGUF LM!" + self.logprobs = 10 + self.temperature = 0.0 + self.max_length = max_length + + def gguf_completion( + self, context, continuation=None, stop=None, retries=3, delay=5, **kwargs + ): + for _ in range(retries): + try: + prompt = context + request = { + "prompt": prompt, + "logprobs": self.logprobs, + "temperature": self.temperature, + } + if continuation: + prompt += continuation + request.update({"prompt": prompt, "max_tokens": 1, "echo": True}) + if stop is not None: + request["stop"] = stop + response = requests.post( + f"{self.base_url}/v1/completions", json=request + ) + response.raise_for_status() + return response.json() + except RequestException as e: + logger.error(f"RequestException: {e}") + time.sleep(delay) # wait before retrying + else: + raise Exception(f"Failed to get a valid response after {retries} retries.") + + def loglikelihood(self, requests, disable_tqdm: bool = False): + if not requests: + return [] + res = [] + for context, continuation in tqdm( + [req.args for req in requests], disable=disable_tqdm + ): + response = self.gguf_completion(context=context, continuation=continuation) + if response and "choices" in response and response["choices"]: + choice = response["choices"][0] + logprobs = choice.get("logprobs") + if ( + logprobs + and "token_logprobs" in logprobs + and logprobs["token_logprobs"] + ): + logprob, is_greedy = get_result(logprobs, len(context)) + res.append((logprob, is_greedy)) + else: + logger.warning( + "Invalid logprobs data. Expected 'logprobs' to contain 'token_logprobs' list." + ) + else: + logger.error( + f"Invalid response for loglikelihood. Response: {response}" + ) + assert False + return res + + def generate_until(self, requests, disable_tqdm: bool = False): + if not requests: + return [] + + res = [] + for request in tqdm([req.args for req in requests], disable=disable_tqdm): + inp = request[0] + request_args = request[1] + until = request_args.get("until", [""]) + response = self.gguf_completion(context=inp, stop=until) + if response and "choices" in response and response["choices"]: + choice = response["choices"][0] + if "text" in choice: + generated_text = choice["text"].strip() + res.append(generated_text) + else: + logger.error( + f"Invalid response for greedy_until. Response: {response}" + ) + res.append(None) # Add default value in case of error + else: + logger.error(f"Invalid response for greedy_until. Response: {response}") + res.append(None) # Add default value in case of error + return res + + def loglikelihood_rolling(self, requests, disable_tqdm: bool = False): + raise NotImplementedError( + "loglikelihood_rolling not yet supported for GGUF models" + ) diff --git a/lm-evaluation-harness/lm_eval/models/huggingface.py b/lm-evaluation-harness/lm_eval/models/huggingface.py new file mode 100644 index 0000000000000000000000000000000000000000..ac3296d5aeba3ce6e0ac29c8373681b3f6a1a233 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/models/huggingface.py @@ -0,0 +1,1274 @@ +import copy +import os +from datetime import timedelta +from pathlib import Path +from typing import List, Literal, Optional, Tuple, Union + +import torch +import torch.nn.functional as F +import transformers +from accelerate import ( + Accelerator, + DistributedType, + InitProcessGroupKwargs, + find_executable_batch_size, +) +from packaging import version +from peft import PeftModel +from peft import __version__ as PEFT_VERSION +from tqdm import tqdm +from transformers.models.auto.modeling_auto import ( + MODEL_FOR_CAUSAL_LM_MAPPING_NAMES, + MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, +) + +from lm_eval import utils +from lm_eval.api.instance import Instance +from lm_eval.api.model import TemplateLM +from lm_eval.api.registry import register_model +from lm_eval.models.utils import ( + Collator, + clear_torch_cache, + get_dtype, + pad_and_concat, + stop_sequences_criteria, +) + + +eval_logger = utils.eval_logger + + +def _get_accelerate_args( + device_map_option: Optional[str] = "auto", + max_memory_per_gpu: Optional[Union[int, str]] = None, + max_cpu_memory: Optional[Union[int, str]] = None, + offload_folder: Optional[str] = "./offload", +) -> dict: + """Returns the kwargs needed to apply `accelerate` in `AutoModel.from_pretrained`.""" + max_memory = {} + if max_memory_per_gpu is not None: + max_memory_per_gpu_map = { + device_idx: max_memory_per_gpu + for device_idx in range(torch.cuda.device_count()) + } + max_memory.update(max_memory_per_gpu_map) + if max_cpu_memory is not None: + max_memory["cpu"] = max_cpu_memory + + args = {} + if max_memory: + args["max_memory"] = max_memory + args["device_map"] = device_map_option + args["offload_folder"] = offload_folder + return args + + +@register_model("hf-auto", "hf", "huggingface") +class HFLM(TemplateLM): + """ + An abstracted Huggingface model class. Enables usage with both models of + `transformers.AutoModelForCausalLM` and `transformers.AutoModelForSeq2SeqLM` classes. + + Supports data-parallel multi-GPU with HF Accelerate. + """ + + AUTO_MODEL_CLASS = None + _DEFAULT_MAX_LENGTH = 2048 + + def __init__( + self, + pretrained: Optional[Union[str, transformers.PreTrainedModel]] = "gpt2", + backend: Optional[Literal["default", "causal", "seq2seq"]] = "default", + # override whether the model should be treated as decoder-only (causal) or encoder-decoder (seq2seq) + revision: Optional[str] = "main", + subfolder: Optional[str] = None, + tokenizer: Optional[ + Union[ + str, + transformers.PreTrainedTokenizer, + transformers.PreTrainedTokenizerFast, + ] + ] = None, + truncation: Optional[bool] = False, + logits_cache: bool = True, + max_length: Optional[int] = None, + device: Optional[str] = "cuda", + dtype: Optional[Union[str, torch.dtype]] = "auto", + batch_size: Optional[Union[int, str]] = 1, + max_batch_size: Optional[int] = 64, + trust_remote_code: Optional[bool] = False, + use_fast_tokenizer: Optional[bool] = True, + add_bos_token: Optional[bool] = False, + prefix_token_id: Optional[int] = None, + # arguments used for splitting a model across GPUs naively. + # only used if `parallelize=True`. + parallelize: Optional[bool] = False, + device_map_option: Optional[str] = "auto", + max_memory_per_gpu: Optional[Union[int, str]] = None, + max_cpu_memory: Optional[Union[int, str]] = None, + offload_folder: Optional[Union[str, os.PathLike]] = "./offload", + # PEFT, delta weights and quantization options + peft: Optional[str] = None, + delta: Optional[str] = None, + autogptq: Optional[Union[bool, str]] = False, + **kwargs, + ) -> None: + super().__init__() + + # optionally: take in an already-initialized transformers.PreTrainedModel + if not isinstance(pretrained, str): + eval_logger.warning( + "`pretrained` model kwarg is not of type `str`. Many other model arguments may be ignored. Please do not launch via accelerate or use `parallelize=True` if passing an existing model this way." + ) + assert not parallelize, "`parallelize=True` is not compatible with passing pre-initialized model to `pretrained`" + self._model = pretrained + self._device = self._model.device + self._config = self._model.config + gpus = 0 + + if tokenizer: + assert isinstance( + tokenizer, transformers.PreTrainedTokenizer + ) or isinstance(tokenizer, transformers.PreTrainedTokenizerFast) + self.tokenizer = tokenizer + else: + # Get tokenizer + model_name = self._model.name_or_path + self.tokenizer = transformers.AutoTokenizer.from_pretrained( + model_name, + revision=revision, + trust_remote_code=trust_remote_code, + use_fast=use_fast_tokenizer, + ) + + else: + assert isinstance(device, str) + assert isinstance(pretrained, str) + assert isinstance(batch_size, (int, str)) + + gpus = torch.cuda.device_count() + accelerator_kwargs = InitProcessGroupKwargs(timeout=timedelta(weeks=52)) + accelerator = Accelerator(kwargs_handlers=[accelerator_kwargs]) + if accelerator.num_processes > 1: + self.accelerator = accelerator + + if not (parallelize or accelerator.num_processes > 1): + # use user-passed device + device_list = set( + ["cuda", "cpu"] + + [f"cuda:{i}" for i in range(torch.cuda.device_count())] + + ["mps", "mps:0"] + ) + if device and device in device_list: + self._device = torch.device(device) + eval_logger.info(f"Using device '{device}'") + if device in ("mps", "mps:0") and version.parse( + torch.__version__ + ) < version.parse("2.1"): + raise RuntimeError( + f"mps requires torch >= 2.1. You have {torch.__version__}" + ) + else: + eval_logger.info("Device not specified") + eval_logger.info(f"Cuda Available? {torch.cuda.is_available()}") + self._device = ( + torch.device("cuda") + if torch.cuda.is_available() + else torch.device("cpu") + ) + else: + if device != "cuda": + eval_logger.info( + f"Using `accelerate launch` or `parallelize=True`, device '{device}' will be overridden when placing model." + ) + # TODO: include in warning that `load_in_8bit` etc. affect this too + self._device = torch.device(device) + + # TODO: update this to be less of a hack once subfolder is fixed in HF + revision = revision + ("/" + subfolder if subfolder is not None else "") + + self._get_config( + pretrained, + revision=revision, + trust_remote_code=trust_remote_code, + ) + + # determine which of 'causal' and 'seq2seq' backends to use + self._get_backend( + config=self.config, backend=backend, trust_remote_code=trust_remote_code + ) + + # if we passed `pretrained` as a string, initialize our model now + if isinstance(pretrained, str): + self._create_model( + pretrained=pretrained, + revision=revision, + dtype=dtype, + trust_remote_code=trust_remote_code, + parallelize=parallelize, + device_map_option=device_map_option, + max_memory_per_gpu=max_memory_per_gpu, + max_cpu_memory=max_cpu_memory, + offload_folder=offload_folder, + peft=peft, + delta=delta, + autogptq=autogptq, + **kwargs, + ) + + # access self._model through self.model property outside this method + if isinstance(self.model, torch.nn.Module): + self.model.eval() + self.model.tie_weights() + + if isinstance(pretrained, str) and (gpus >= 1 or str(self.device) == "mps"): + # TODO: can remove this whole snippet except in the mps case, perhaps? + if not (parallelize or autogptq or hasattr(self, "accelerator")): + # place model onto device requested manually, + # if not using HF Accelerate or device_map + # or any other option that preloads model onto device + try: + self.model.to(self.device) + except ValueError: + eval_logger.debug( + "Failed to place model onto specified device. This may be because the model is quantized via `bitsandbytes` or `device_map` is provided. If the desired GPU is being used, this message is safe to ignore." + ) + + self._create_tokenizer( + pretrained, + tokenizer, + revision=revision, + trust_remote_code=trust_remote_code, + use_fast_tokenizer=use_fast_tokenizer, + ) + + self.truncation = truncation + self.logits_cache = logits_cache + self.vocab_size = self.tokenizer.vocab_size + # select (or create) a pad token to use + if self.tokenizer.pad_token: + pass + elif self.tokenizer.unk_token: + self.tokenizer.pad_token_id = self.tokenizer.unk_token_id + elif self.tokenizer.eos_token: + self.tokenizer.pad_token_id = self.tokenizer.eos_token_id + else: + if getattr(self.config, "model_type", None) == "qwen": + # Qwen's trust_remote_code tokenizer does not allow for adding special tokens + self.tokenizer.pad_token = "<|endoftext|>" + elif ( + self.tokenizer.__class__.__name__ == "RWKVWorldTokenizer" + or self.tokenizer.__class__.__name__ == "Rwkv5Tokenizer" + ): + # The RWKV world tokenizer, does not allow for adding special tokens / setting the pad token (which is set as 0) + # The additional tokenizer name check is needed, as there exists rwkv4 models with neox tokenizer + # --- + # Note that the world tokenizer class name, might change in the future for the final huggingface merge + # https://github.com/huggingface/transformers/pull/26963 + assert self.tokenizer.pad_token_id == 0 + else: + self.tokenizer.add_special_tokens({"pad_token": "<|pad|>"}) + + # TODO: override this for Gemma + self.add_bos_token = add_bos_token + if getattr(self.config, "model_type", None) == "gemma": + self.add_bos_token = True + eval_logger.info( + f"Model type is '{self.config.model_type}', a BOS token will be used as Gemma underperforms without it." + ) + + self._max_length = max_length + + self.batch_schedule = 1 + self.batch_sizes = {} + self.max_batch_size = max_batch_size + + if str(batch_size).startswith("auto"): + batch_size = batch_size.split(":") + self.batch_size_per_gpu = batch_size[0] + self.batch_schedule = float(batch_size[1]) if len(batch_size) > 1 else 1 + else: + self.batch_size_per_gpu = int(batch_size) + + if isinstance(pretrained, str): + # multigpu data-parallel support when launched with accelerate + if gpus > 1: + if parallelize: + if accelerator.num_processes > 1: + raise RuntimeError( + "Attempted to use both a HF Accelerate `device_map` and to launch via `accelerate launch`. If this is the case, please either remove `parallelize=True` from --model_args or launch outside of the Accelerate launcher." + ) + else: + pass + elif accelerator.num_processes == 1: + # if we aren't launching via accelerate, ditch + self._rank = 0 + self._world_size = 1 + else: + if gpus > accelerator.num_processes: + eval_logger.warning( + "WARNING: The number of total system GPUs does not match the number of spawned processes. " + "If you would like to use data parallelism, please launch the script " + "with 'accelerate launch *script*'. " + f"Current run will proceed with {accelerator.num_processes} devices." + ) + assert ( + accelerator.distributed_type + in [ + DistributedType.FSDP, + DistributedType.MULTI_GPU, + ] + ), "Unsupported distributed type provided. Only DDP and FSDP are supported." + if accelerator.distributed_type == DistributedType.FSDP: + self._model = accelerator.prepare(self.model) + else: + self._model = accelerator.prepare_model( + self.model, evaluation_mode=True + ) + self._device = torch.device( + f"cuda:{accelerator.local_process_index}" + ) + self.accelerator = accelerator + + if self.accelerator.is_local_main_process: + eval_logger.info(f"Using {gpus} devices with data parallelism") + + self._rank = self.accelerator.local_process_index + self._world_size = self.accelerator.num_processes + else: + # if a PreTrainedModel was passed into HFLM, we forgo distributed setup. + eval_logger.warning( + "Passed an already-initialized model through `pretrained`, assuming single-process call to evaluate() or custom distributed integration" + ) + self._rank = 0 + self._world_size = 1 + + self.custom_prefix_token_id = prefix_token_id + if prefix_token_id is not None: + eval_logger.info( + f"Loglikelihood prefix token id used in evaluation: {self.prefix_token_id}" + ) + + @property + def config(self): + # return the associated transformers.AutoConfig for the given pretrained model. + return self._config + + @property + def model(self): + # returns the model, unwrapping it if using Accelerate + if hasattr(self, "accelerator"): + return self.accelerator.unwrap_model(self._model) + else: + return self._model + + @property + def eot_token_id(self): + # we use EOT because end of *text* is more accurate for what we're doing than end of *sentence* + return self.tokenizer.eos_token_id + + @property + def prefix_token_id(self): + # it is used as prefix for loglikelihood + if self.custom_prefix_token_id is not None: + return self.custom_prefix_token_id + if self.tokenizer.bos_token_id is not None: + return self.tokenizer.bos_token_id + return self.tokenizer.eos_token_id + + @property + def max_length(self): + if self._max_length: # if max length manually set, return it + return self._max_length + seqlen_config_attrs = ("n_positions", "max_position_embeddings", "n_ctx") + for attr in seqlen_config_attrs: + if hasattr(self.model.config, attr): + return getattr(self.model.config, attr) + if hasattr(self.tokenizer, "model_max_length"): + if self.tokenizer.model_max_length == 1000000000000000019884624838656: + return self._DEFAULT_MAX_LENGTH + return self.tokenizer.model_max_length + return self._DEFAULT_MAX_LENGTH + + @property + def max_gen_toks(self) -> int: + return 256 + + @property + def batch_size(self): + return self.batch_size_per_gpu + + @property + def device(self): + return self._device + + @property + def rank(self): + return self._rank + + @property + def world_size(self): + return self._world_size + + def _get_backend( + self, + config: Union[transformers.PretrainedConfig, transformers.AutoConfig], + backend: Optional[Literal["default", "causal", "seq2seq"]] = "default", + trust_remote_code: Optional[bool] = False, + ) -> None: + """ + Helper method during initialization. + Determines the backend ("causal" (decoder-only) or "seq2seq" (encoder-decoder)) + model type to be used. + """ + assert backend in ["default", "causal", "seq2seq"] + + if backend != "default": + # if we've settled on non-default backend, use that manually + if backend == "causal": + self.AUTO_MODEL_CLASS = transformers.AutoModelForCausalLM + elif backend == "seq2seq": + self.AUTO_MODEL_CLASS = transformers.AutoModelForSeq2SeqLM + eval_logger.info( + f"Overrode HF model backend type, and using type '{backend}'" + ) + else: + # determine and use the default HF backend for this model, based on its config + metadata. + if ( + getattr(config, "model_type") + in MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES + ): + # first check if model type is listed under seq2seq models, since some + # models like MBart are listed in both seq2seq and causal mistakenly in HF transformers. + # these special cases should be treated as seq2seq models. + self.AUTO_MODEL_CLASS = transformers.AutoModelForSeq2SeqLM + elif ( + getattr(self.config, "model_type") in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES + ): + self.AUTO_MODEL_CLASS = transformers.AutoModelForCausalLM + else: + if not trust_remote_code: + eval_logger.warning( + "HF model type is neither marked as CausalLM or Seq2SeqLM. \ + This is expected if your model requires `trust_remote_code=True` but may be an error otherwise." + ) + # if model type is neither in HF transformers causal or seq2seq model registries + # then we default to AutoModelForCausalLM + self.AUTO_MODEL_CLASS = transformers.AutoModelForCausalLM + + assert self.AUTO_MODEL_CLASS in [ + transformers.AutoModelForCausalLM, + transformers.AutoModelForSeq2SeqLM, + ] + return None + + def _get_config( + self, + pretrained: str, + revision: str = "main", + trust_remote_code: bool = False, + ) -> None: + self._config = transformers.AutoConfig.from_pretrained( + pretrained, + revision=revision, + trust_remote_code=trust_remote_code, + ) + + def _create_model( + self, + pretrained: str, + revision: Optional[str] = "main", + dtype: Optional[Union[str, torch.dtype]] = "auto", + trust_remote_code: Optional[bool] = False, + # arguments used for splitting a model across GPUs naively. + # only used if `parallelize=True`. + # (accelerate naive PP (device_map) options) + parallelize: Optional[bool] = False, + device_map_option: Optional[str] = "auto", + max_memory_per_gpu: Optional[Union[int, str]] = None, + max_cpu_memory: Optional[Union[int, str]] = None, + offload_folder: Optional[str] = "./offload", + # PEFT, delta weights and quantization options + peft: Optional[str] = None, + delta: Optional[str] = None, + autogptq: Optional[Union[bool, str]] = False, + **kwargs, + ) -> None: + """ + Initializes an HF or HF-compatible PreTrainedModel from scratch + inside HFLM, using the kwargs passed into self.__init__(). + + Also handles functionality such as AutoGPTQ usage and PEFT wrapping. + + For future similar extensions to AutoGPTQ that are not core to HF's ecosystem, + (such as PyTorch models that are nearly, but not quite, fully mirroring + HF's public interface relied on in this HFLM class) + please consider subclassing HFLM and overriding this and other methods as needed. + """ + + model_kwargs = kwargs if kwargs else {} + + if parallelize: + model_kwargs.update( + _get_accelerate_args( + device_map_option, # TODO: phase out device_map_option? + max_memory_per_gpu, + max_cpu_memory, + offload_folder, + ) + ) + elif "device_map" not in model_kwargs: + # set a device_map to initialize model on the right GPU. + # this is needed because it seems that the default behavior + # for quantized models now seems to be device_map="auto" + # which breaks data-parallel mode. + if hasattr(self, "accelerator"): + model_kwargs.update( + {"device_map": {"": f"cuda:{self.accelerator.local_process_index}"}} + ) + else: + model_kwargs.update({"device_map": {"": str(self.device)}}) + + if not autogptq: + if model_kwargs.get("load_in_4bit", None): + assert ( + transformers.__version__ >= "4.30.0" + ), "load_in_4bit requires transformers >= 4.30.0" + if transformers.__version__ >= "4.30.0": + if model_kwargs.get("load_in_4bit", None): + if model_kwargs.get("bnb_4bit_compute_dtype", None): + model_kwargs["bnb_4bit_compute_dtype"] = get_dtype( + model_kwargs["bnb_4bit_compute_dtype"] + ) + self._model = self.AUTO_MODEL_CLASS.from_pretrained( + pretrained, + revision=revision, + torch_dtype=get_dtype(dtype), + trust_remote_code=trust_remote_code, + **model_kwargs, + ) + else: + try: + from auto_gptq import AutoGPTQForCausalLM + except ModuleNotFoundError: + raise Exception( + "Tried to load auto_gptq, but auto-gptq is not installed ", + "please install auto-gptq via pip install lm-eval[gptq] or pip install -e .[gptq]", + ) + + self._model = AutoGPTQForCausalLM.from_quantized( + pretrained, + trust_remote_code=trust_remote_code, + model_basename=None if autogptq is True else Path(autogptq).stem, + use_safetensors=True + if autogptq is True + else autogptq.endswith(".safetensors"), + **model_kwargs, + ) + + if peft and delta: + raise ValueError( + "Cannot use both 'peft' and 'delta' options at the same time." + ) + + if peft: + if model_kwargs.get("load_in_4bit", None): + if version.parse(PEFT_VERSION) < version.parse("0.4.0"): + raise AssertionError("load_in_4bit requires peft >= 0.4.0") + self._model = PeftModel.from_pretrained( + self._model, peft, revision=revision + ) + elif delta: + if autogptq: + eval_logger.warning( + "Delta weights might trigger unexpected behavior when used with AutoGPTQ." + ) + _model_delta = self.AUTO_MODEL_CLASS.from_pretrained( + delta, + revision=revision, + torch_dtype=get_dtype(dtype), + trust_remote_code=trust_remote_code, + **model_kwargs, + ) + for name, param in self._model.state_dict().items(): + try: + param.data += _model_delta.state_dict()[name] + except KeyError: + raise KeyError(f"Delta model is missing weights for layer: {name}") + except Exception as e: + raise RuntimeError( + f"Failed to add delta weights to layer {name}. Error: {e}" + ) + + del _model_delta + + return None + + def _create_tokenizer( + self, + pretrained: Union[str, transformers.PreTrainedModel], + tokenizer: Optional[ + Union[ + str, + transformers.PreTrainedTokenizer, + transformers.PreTrainedTokenizerFast, + ] + ], + revision: Optional[str] = "main", + trust_remote_code: Optional[bool] = False, + use_fast_tokenizer: Optional[bool] = True, + ) -> None: + """ + Helper method during initialization. + + Create a tokenizer object corresponding to the correct + tokenizer for value of `pretrained`, or use the pre-initialized tokenizer passed. + """ + + if tokenizer: + if isinstance(tokenizer, str): + self.tokenizer = transformers.AutoTokenizer.from_pretrained( + tokenizer, + revision=revision, + trust_remote_code=trust_remote_code, + use_fast=use_fast_tokenizer, + ) + else: + assert isinstance( + tokenizer, transformers.PreTrainedTokenizer + ) or isinstance(tokenizer, transformers.PreTrainedTokenizerFast) + self.tokenizer = tokenizer + else: + # Get tokenizer based on 'pretrained' + if isinstance(pretrained, str): + model_name = pretrained + else: + # get the HF hub name via accessor on model + model_name = self.model.name_or_path + self.tokenizer = transformers.AutoTokenizer.from_pretrained( + model_name, + revision=revision, + trust_remote_code=trust_remote_code, + use_fast=use_fast_tokenizer, + ) + return None + + def _detect_batch_size(self, requests=None, pos: int = 0): + if requests: + _, context_enc, continuation_enc = requests[pos] + max_length = len( + (context_enc + continuation_enc)[-(self.max_length + 1) :][:-1] + ) + max_context_enc = len(context_enc[-(self.max_length + 1) :]) + max_cont_enc = len(continuation_enc[-(self.max_length + 1) :]) + else: + max_length = self.max_length + + # if OOM, then halves batch_size and tries again + @find_executable_batch_size(starting_batch_size=self.max_batch_size) + def forward_batch(batch_size): + if self.AUTO_MODEL_CLASS == transformers.AutoModelForSeq2SeqLM: + length = max(max_context_enc, max_cont_enc) + batched_conts = torch.ones( + (batch_size, length), device=self.device + ).long() + test_batch = torch.ones((batch_size, length), device=self.device).long() + call_kwargs = { + "attn_mask": test_batch, + "labels": batched_conts, + } + else: + call_kwargs = {} + test_batch = torch.ones( + (batch_size, max_length), device=self.device + ).long() + for _ in range(5): + out = F.log_softmax(self._model_call(test_batch, **call_kwargs), dim=-1) # noqa: F841 + + return batch_size + + try: + batch_size = forward_batch() + except RuntimeError as e: + if "No executable batch size found" in str(e): + batch_size = 1 + else: + raise + + if self.world_size > 1: + # if multi-GPU, always take minimum over all selected batch sizes + max_rnk_bs = torch.tensor([batch_size], device=self.device) + gathered = ( + self.accelerator.gather(max_rnk_bs).cpu().detach().numpy().tolist() + ) + batch_size = min(gathered) + clear_torch_cache() + return batch_size + + clear_torch_cache() + return batch_size + + def tok_encode( + self, string: str, left_truncate_len=None, add_special_tokens=None + ) -> List[int]: + """ """ + # default for None - empty dict, use predefined tokenizer param + # used for all models except for CausalLM or predefined value + special_tokens_kwargs = {} + + # by default for CausalLM - false or self.add_bos_token is set + if add_special_tokens is None: + if self.AUTO_MODEL_CLASS == transformers.AutoModelForCausalLM: + special_tokens_kwargs = { + "add_special_tokens": False or self.add_bos_token + } + # otherwise the method explicitly defines the value + else: + special_tokens_kwargs = {"add_special_tokens": add_special_tokens} + + encoding = self.tokenizer.encode(string, **special_tokens_kwargs) + + # left-truncate the encoded context to be at most `left_truncate_len` tokens long + if left_truncate_len: + encoding = encoding[-left_truncate_len:] + + return encoding + + def tok_batch_encode( + self, + strings: List[str], + padding_side: str = "left", + left_truncate_len: int = None, + truncation: bool = False, + ) -> Tuple[torch.Tensor, torch.Tensor]: + # encode a batch of strings. converts to tensors and pads automatically, unlike tok_encode. + old_padding_side = self.tokenizer.padding_side + self.tokenizer.padding_side = padding_side + + add_special_tokens = {} + if self.AUTO_MODEL_CLASS == transformers.AutoModelForCausalLM: + add_special_tokens = {"add_special_tokens": False or self.add_bos_token} + + encoding = self.tokenizer( + strings, + truncation=truncation, + padding="longest", + return_tensors="pt", + **add_special_tokens, + ) + if left_truncate_len: + encoding["input_ids"] = encoding["input_ids"][:, -left_truncate_len:] + encoding["attention_mask"] = encoding["attention_mask"][ + :, -left_truncate_len: + ] + self.tokenizer.padding_side = old_padding_side + + return encoding["input_ids"], encoding["attention_mask"] + + def tok_decode(self, tokens, skip_special_tokens=True): + return self.tokenizer.decode(tokens, skip_special_tokens=skip_special_tokens) + + def _model_call(self, inps, attn_mask=None, labels=None): + """ + :param inps: torch.Tensor + A torch tensor of shape [batch, (sequence_ctx + sequence_cont)] or of shape + [batch, sequence_ctx]. the size of sequence may vary from call to call + :param attn_mask: torch.Tensor, optional + A torch tensor of shape [batch, (sequence_ctx + sequence_cont)]. Only passed + (and must be passed) if self.AUTO_MODEL_CLASS is transformers.AutoModelForSeq2SeqLM + :param labels: torch.Tensor, optional + A torch tensor of shape [batch, (sequence_ctx + sequence_cont)]. Only passed + (and must be passed) if self.AUTO_MODEL_CLASS is transformers.AutoModelForSeq2SeqLM + :return + A torch tensor of shape [batch, sequence, vocab] with the + logits returned from the model's decoder + """ + with torch.no_grad(): + if attn_mask is not None or labels is not None: + assert attn_mask is not None and labels is not None + assert self.AUTO_MODEL_CLASS == transformers.AutoModelForSeq2SeqLM + return self.model( + input_ids=inps, attention_mask=attn_mask, labels=labels + ).logits + else: + assert self.AUTO_MODEL_CLASS == transformers.AutoModelForCausalLM + return self.model(inps).logits + + def _model_generate(self, context, max_length, stop, **generation_kwargs): + # temperature = 0.0 if not set + # if do_sample is false and temp==0.0: + # remove temperature, as do_sample=False takes care of this + # and we don't want a warning from HF + generation_kwargs["temperature"] = generation_kwargs.get("temperature", 0.0) + do_sample = generation_kwargs.get("do_sample", None) + + # The temperature has to be a strictly positive float -- if it is 0.0, use greedy decoding strategies + if generation_kwargs.get("temperature") == 0.0 and do_sample is None: + generation_kwargs["do_sample"] = do_sample = False + + if do_sample is False and generation_kwargs.get("temperature") == 0.0: + generation_kwargs.pop("temperature") + # build stopping criteria + stopping_criteria = stop_sequences_criteria( + self.tokenizer, stop, context.shape[1], context.shape[0] + ) + return self.model.generate( + input_ids=context, + max_length=max_length, + stopping_criteria=stopping_criteria, + pad_token_id=self.tokenizer.pad_token_id, + use_cache=True, + **generation_kwargs, + ) + + def _select_cont_toks( + self, logits: torch.Tensor, contlen: int = None, inplen: int = None + ) -> torch.Tensor: + if self.AUTO_MODEL_CLASS == transformers.AutoModelForCausalLM: + assert ( + contlen and inplen + ), "Must pass input len and cont. len to select scored logits for causal LM" + # discard right-padding. + # also discard the input/context tokens. we'll only score continuations. + logits = logits[inplen - contlen : inplen] + elif self.AUTO_MODEL_CLASS == transformers.AutoModelForSeq2SeqLM: + assert ( + contlen and not inplen + ), "Selecting scored logits for Seq2SeqLM requires only cont. len" + # only discard right-padding. + # the logits input to this fn only contain decoder-side tokens. + logits = logits[:contlen] + + return logits + + def loglikelihood_rolling( + self, requests: List[Instance], disable_tqdm: bool = False + ) -> List[float]: + loglikelihoods = [] + + adaptive_batch_size = None + if self.batch_size == "auto": + # using rolling window with maximum context + print("Passed argument batch_size = auto. Detecting largest batch size") + batch_size = self._detect_batch_size() + print(f"Determined Largest batch size: {batch_size}") + adaptive_batch_size = batch_size + + for (string,) in tqdm( + [req.args for req in requests], disable=(disable_tqdm or (self.rank != 0)) + ): + rolling_token_windows = list( + map( + utils.make_disjoint_window, + utils.get_rolling_token_windows( + token_list=self.tok_encode(string), + prefix_token=self.prefix_token_id, + max_seq_len=self.max_length, + context_len=1, + ), + ) + ) + + # TODO: Right now, we pass single EOT token to the Encoder and the full context to the decoder, in seq2seq case + rolling_token_windows = [(None,) + x for x in rolling_token_windows] + + pad_amnt = 0 + if self.world_size > 1: + # We pad out the external document-level iterator so the inner iterator doesn't hang + mytensor = torch.tensor(len(rolling_token_windows), device=self.device) + gathered = ( + self.accelerator.gather(mytensor).cpu().detach().numpy().tolist() + ) + + pad_amnt = max(gathered) - gathered[self.rank] + if pad_amnt > 0: + rolling_token_windows += pad_amnt * [rolling_token_windows[0]] + + string_nll = self._loglikelihood_tokens( + requests=rolling_token_windows, + disable_tqdm=True, + override_bs=adaptive_batch_size, + ) + + if (self.world_size > 1) and (pad_amnt > 0): + string_nll = [x[0] for x in string_nll[:-pad_amnt]] + else: + # discard is_greedy + string_nll = [x[0] for x in string_nll] + + string_nll = sum(string_nll) + loglikelihoods.append(string_nll) + + return loglikelihoods + + def _batch_scheduler(self, pos, n_reordered_requests): + sched = pos // int(len(n_reordered_requests) / self.batch_schedule) + if sched in self.batch_sizes: + return self.batch_sizes[sched] + if (len(self.batch_sizes) > 1) and ( + self.batch_sizes[sched - 1] == self.max_batch_size + ): + # if previous batch size is already maximal, skip recomputation + self.batch_sizes[sched] = self.max_batch_size + return self.batch_sizes[sched] + print( + f"Passed argument batch_size = auto:{self.batch_schedule}. Detecting largest batch size" + ) + self.batch_sizes[sched] = self._detect_batch_size(n_reordered_requests, pos) + print(f"Determined largest batch size: {self.batch_sizes[sched]}") + return self.batch_sizes[sched] + + def _loglikelihood_tokens( + self, + requests: List[Tuple[Tuple[str, str], List[int], List[int]]], + disable_tqdm: bool = False, + override_bs: int = None, + ) -> List[Tuple[float, bool]]: + # TODO: implement some kind of efficient-request-middleware that lumps together requests with the same context + res = [] + + def _collate(req: Tuple[Tuple[str, str], List[int], List[int]]): + """Defines the key for the sorted method""" + # the negative sign on len(toks) sorts descending - this has a few advantages: + # - time estimates will always be over not underestimates, which is more useful for planning + # - to know the size of a batch when going through the list, you know the first one is always the batch + # padded context length. this is useful to simplify the batching logic and more importantly to make + # automatic adaptive batches much much easier to implement + # - any OOMs will happen right away rather than near the end + + toks = req[1] + req[2] + return -len(toks), tuple(toks) + + def _lookup_one_token_cont(req: Tuple[Tuple[str, str], List[int], List[int]]): + """Defines the key to group and lookup one-token continuations""" + # Use with group_by="contexts" (optional)" + # allows for the creation of a lookup, so we can reuse logits in case of one-token continuations. + # speeds up some multiple-choice tasks proportionally to the number of choices. + # groups requests by context+continuation[:-1] and infer on one request/group. + return req[-2] + req[-1][:-1] + + re_ord = Collator( + requests, + sort_fn=_collate, + group_by="contexts" + if self.AUTO_MODEL_CLASS == transformers.AutoModelForCausalLM + and self.logits_cache + else None, + group_fn=_lookup_one_token_cont, + ) + + # automatic (variable) batch size detection for vectorization + # pull longest context sample from request + n_reordered_requests = len(re_ord) + batch_size = ( + self.batch_size + if self.batch_size != "auto" + else override_bs + if override_bs is not None + else 0 + ) + batch_fn = ( + self._batch_scheduler + if self.batch_size == "auto" + and n_reordered_requests > 0 + and not override_bs + else None + ) + + chunks = re_ord.get_batched(n=batch_size, batch_fn=batch_fn) + pbar = tqdm( + total=len(requests), + disable=(disable_tqdm or (self.rank != 0)), + desc="Running loglikelihood requests", + ) + for chunk in chunks: + inps = [] + cont_toks_list = [] + inplens = [] + + conts = [] + encoder_attns = [] + + padding_len_inp = None + padding_len_cont = None + # because vectorizing is annoying, we first convert each (context, continuation) pair to padded + # tensors, then we pack them together into a batch, call the model, and then pick it all apart + # again because vectorizing is annoying + + for _, context_enc, continuation_enc in chunk: + # sanity check + assert len(context_enc) > 0 + assert len(continuation_enc) > 0 + assert len(continuation_enc) <= self.max_length + + # how this all works (illustrated on a causal decoder-only setup): + # CTX CONT + # inp 0 1 2 3|4 5 6 7 8 9 <- last token is deleted by inp[:, :-1] + # model \ \ + # logits 1 2 3|4 5 6 7 8 9 <- the ctx half gets tossed out by the + # cont_toks 4 5 6 7 8 9 [:, -len(continuation_enc):, :self.vocab_size] slice + + # when too long to fit in context, truncate from the left + if self.AUTO_MODEL_CLASS == transformers.AutoModelForCausalLM: + inp = torch.tensor( + (context_enc + continuation_enc)[-(self.max_length + 1) :][:-1], + dtype=torch.long, + device=self.device, + ) + (inplen,) = inp.shape + elif self.AUTO_MODEL_CLASS == transformers.AutoModelForSeq2SeqLM: + inp = torch.tensor( + (context_enc)[-self.max_length :], + dtype=torch.long, + device=self.device, + ) + (inplen,) = inp.shape + + # build encoder attn masks + encoder_attns.append(torch.ones_like(inp)) + + cont = torch.tensor( + (continuation_enc)[-self.max_length :], + # TODO: left-shift these? + # TODO: our code assumes we never end up truncating conts for either model type + dtype=torch.long, + device=self.device, + ) + (contlen,) = cont.shape + + conts.append(cont) + + padding_len_cont = ( + max(padding_len_cont, contlen) + if padding_len_cont is not None + else contlen + ) + + padding_len_inp = ( + max(padding_len_inp, inplen) + if padding_len_inp is not None + else inplen + ) + + inps.append(inp) # [1, inp_length] + cont_toks_list.append(continuation_enc) + inplens.append(inplen) + + # create encoder attn mask and batched conts, if seq2seq + call_kwargs = {} + if self.AUTO_MODEL_CLASS == transformers.AutoModelForCausalLM: + batched_inps = pad_and_concat( + padding_len_inp, inps, padding_side="right" + ) # [batch, padding_len_inp] + elif self.AUTO_MODEL_CLASS == transformers.AutoModelForSeq2SeqLM: + # TODO: left-pad encoder inps and mask? + batched_inps = pad_and_concat( + padding_len_inp, inps + ) # [batch, padding_len_inp] + batched_conts = pad_and_concat( + padding_len_cont, conts + ) # [batch, padding_len_cont] + batched_encoder_mask = pad_and_concat( + padding_len_inp, encoder_attns + ) # [batch, padding_len_inp] + call_kwargs = { + "attn_mask": batched_encoder_mask, + "labels": batched_conts, + } + + multi_logits = F.log_softmax( + self._model_call(batched_inps, **call_kwargs), dim=-1 + ) # [batch, padding_length (inp or cont), vocab] + + for (request_str, ctx_tokens, _), logits, inplen, cont_toks in zip( + chunk, multi_logits, inplens, cont_toks_list + ): + # Slice to original seq length + contlen = len(cont_toks) + # take only logits in the continuation + # (discard context toks if decoder-only ; discard right-padding) + # also discards + checks for "virtual tokens" in the causal LM's input window + # from prompt/prefix tuning tokens, if applicable + ctx_len = ( + inplen + (logits.shape[0] - padding_len_inp) + if self.AUTO_MODEL_CLASS == transformers.AutoModelForCausalLM + else None + ) + logits = self._select_cont_toks(logits, contlen=contlen, inplen=ctx_len) + logits = logits.unsqueeze(0) # [1, seq, vocab] + + # Check if per-token argmax is exactly equal to continuation + greedy_tokens = logits.argmax(dim=-1) + + # check for one-token continuation cache hits. + # noop in case group_by != "contexts" or no cache hit and returns the + # original args. Otherwise, expands the logits batch dimension and yields each + # batch along with matching continuation tokens and prompt strings. + # logits -> [1, seq, vocab] + for request_str, cont_toks, logits in re_ord.get_cache( + req_str=request_str, + cxt_toks=ctx_tokens, + cont_toks=cont_toks, + logits=logits, + ): + cont_toks = torch.tensor( + cont_toks, dtype=torch.long, device=self.device + ).unsqueeze(0) # [1, seq] + max_equal = (greedy_tokens == cont_toks).all() + + # Obtain log-probs at the corresponding continuation token indices + # last_token_slice = logits[:, -1, :].squeeze(0).tolist() + logits = torch.gather(logits, 2, cont_toks.unsqueeze(-1)).squeeze( + -1 + ) # [1, seq] + + # Answer: (log prob, is-exact-match) + answer = (float(logits.sum()), bool(max_equal)) + + res.append(answer) + + self.cache_hook.add_partial("loglikelihood", request_str, answer) + pbar.update(1) + + pbar.close() + + return re_ord.get_original(res) + + def generate_until( + self, requests: List[Instance], disable_tqdm: bool = False + ) -> List[str]: + res = [] + + def _collate(req: Tuple[str, dict]): + """Defines the key for the sorted method""" + # the negative sign on len(toks) sorts descending - this has a few advantages: + # - time estimates will always be over not underestimates, which is more useful for planning + # - to know the size of a batch when going through the list, you know the first one is always the batch + # padded context length. this is useful to simplify the batching logic and more importantly to make + # automatic adaptive batches much much easier to implement + # - any OOMs will happen right away rather than near the end + toks = self.tok_encode(req[0]) + return -len(toks), req[0] + + pbar = tqdm( + total=len(requests), + disable=(disable_tqdm or (self.rank != 0)), + desc="Running generate_until requests", + ) + adaptive_batch_size = None + if self.batch_size == "auto": + # using rolling window with maximum context + print("Passed argument batch_size = auto. Detecting largest batch size") + batch_size = self._detect_batch_size() + print(f"Determined Largest batch size: {batch_size}") + adaptive_batch_size = batch_size + # for each different set of kwargs, we execute all requests, by batch. + batch_size = ( + self.batch_size + if self.batch_size != "auto" + else adaptive_batch_size + if adaptive_batch_size is not None + else 0 + ) + batch_fn = ( + self._batch_scheduler + if self.batch_size == "auto" and not adaptive_batch_size + else None + ) + + # we group requests by their generation_kwargs, + # so that we don't try to execute e.g. greedy sampling and temp=0.8 sampling + # in the same batch. + # group_fn=lambda x: x[1] -> x=(context, gen_kwargs) + re_ords = Collator( + [reg.args for reg in requests], + sort_fn=_collate, + group_by="gen_kwargs", + group_fn=lambda x: x[1], + ) + chunks = re_ords.get_batched(n=batch_size, batch_fn=batch_fn) + for chunk in chunks: + contexts, all_gen_kwargs = zip(*chunk) + # we assume all gen kwargs in the batch are the same + # this is safe to assume because the `grouper` object ensures it. + gen_kwargs = all_gen_kwargs[0] + # unpack our keyword arguments. + until = None + if isinstance(gen_kwargs, dict): + kwargs = copy.deepcopy(gen_kwargs) # edge case for repeats > 1 + if "until" in kwargs.keys(): + until = kwargs.pop("until") + if isinstance(until, str): + until = [until] + elif not isinstance(until, list): + raise ValueError( + f"Expected `kwargs['until']` to be of type Union[str,list] but got {until}" + ) + else: + raise ValueError( + f"Expected `kwargs` to be of type `dict` but got {type(gen_kwargs)}" + ) + # add EOS token to stop sequences + eos = self.tok_decode(self.eot_token_id, skip_special_tokens=False) + if not until: + until = [eos] + else: + until.append(eos) + if "max_gen_toks" in kwargs.keys(): + max_gen_toks = kwargs.pop("max_gen_toks") + else: + max_gen_toks = self.max_gen_toks + + # set the max length in tokens of inputs ("context_enc") + if self.AUTO_MODEL_CLASS == transformers.AutoModelForCausalLM: + # max len for inputs = max length, minus room to generate the max new tokens + max_ctx_len = self.max_length - max_gen_toks + elif self.AUTO_MODEL_CLASS == transformers.AutoModelForSeq2SeqLM: + # max len for inputs = encoder's whole max_length + max_ctx_len = self.max_length + + # encode, pad, and truncate contexts for this batch + context_enc, attn_masks = self.tok_batch_encode( + contexts, + left_truncate_len=max_ctx_len, + truncation=self.truncation, + ) + context_enc = context_enc.to(self.device) + attn_masks = attn_masks.to(self.device) + + if "max_length" not in kwargs: + kwargs["max_length"] = context_enc.shape[1] + max_gen_toks + + # perform batched generation + cont = self._model_generate( + context=context_enc, + attention_mask=attn_masks, + stop=until, + **kwargs, + ) + + cont_toks_list = cont.tolist() + for cont_toks, context in zip(cont_toks_list, contexts): + # discard context + left-padding toks if using causal decoder-only LM + if self.AUTO_MODEL_CLASS == transformers.AutoModelForCausalLM: + cont_toks = cont_toks[context_enc.shape[1] :] + + s = self.tok_decode(cont_toks) + + # use secondary stop seqs to cut off should-have-been-stopped content post-hoc + for term in until: + if len(term) > 0: + # ignore '' separator, + # for seq2seq case where self.tok_decode(self.eot_token_id) = '' + s = s.split(term)[0] + + res.append(s) + + self.cache_hook.add_partial("generate_until", (context, gen_kwargs), s) + pbar.update(1) + # reorder this group of results back to original unsorted form + res = re_ords.get_original(res) + + pbar.close() + + return res diff --git a/lm-evaluation-harness/lm_eval/models/mamba_lm.py b/lm-evaluation-harness/lm_eval/models/mamba_lm.py new file mode 100644 index 0000000000000000000000000000000000000000..cd9049836838a1dabb2baf383f8e8ce5a02e7391 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/models/mamba_lm.py @@ -0,0 +1,126 @@ +from typing import Optional, Union + +import torch + +import lm_eval.models.utils +from lm_eval.api.registry import register_model +from lm_eval.models.huggingface import HFLM + + +@register_model("mamba_ssm") +class MambaLMWrapper(HFLM): + def __init__( + self, + pretrained="state-spaces/mamba-130m", + **kwargs, + ) -> None: + """ + Mamba (via the `mamba_ssm` package) supports the following args: + ``` + d_model: int, + n_layer: int, + vocab_size: int, + initializer_cfg=None, + pad_vocab_size_multiple: int = 1, + ssm_cfg=None, + norm_epsilon: float = 1e-5, + rms_norm: bool = False, + initializer_cfg=None, + fused_add_norm=False, + residual_in_fp32=False, + ``` + + See https://github.com/state-spaces/mamba/blob/main/mamba_ssm/models/mixer_seq_simple.py#L175 for more info. + The above can all be passed via `--model_args` or to this __init__() directly + but we recommend placing many of these within the config.json file uploaded alongside your + Mamba model to the HF Hub instead. + All other HuggingFace from_pretrained() kwargs + such as those related to + `parallelize=True`, PEFT, autoGPTQ, + or any sub-configurations of these advanced args, + are unsupported by the `mamba_ssm` package. + + The HFLM arguments + + `backend`, `tokenizer`, `truncation`, `max_length`, + `device`, `dtype`, `batch_size`, `max_batch_size`, `trust_remote_code`, `use_fast_tokenizer` + + Are all supported by Mamba where they do not conflict + with Mamba-specific restrictions such as causal LMs only. + """ + + if "backend" in kwargs: + # mamba currently only supports causal models + assert kwargs["backend"] == "causal" + + super().__init__( + pretrained=pretrained, + # set appropriate defaults for tokenizer, max length, etc + backend=kwargs.pop("backend", "causal"), + tokenizer=kwargs.pop("tokenizer", "EleutherAI/gpt-neox-20b"), + max_length=kwargs.pop("max_length", 2048), + **kwargs, + ) + + def _get_config( + self, + pretrained: str, + **kwargs, + ) -> None: + try: + from mamba_ssm.utils.hf import load_config_hf # noqa: F811 + except ModuleNotFoundError: + raise Exception( + "attempted to use 'mamba_ssm' LM type, but package `mamba_ssm` is not installed. \ +please install mamba via `pip install lm-eval[mamba]` or `pip install -e .[mamba]`", + ) + + self._config = load_config_hf(pretrained) + + def _create_model( + self, + pretrained: str, + dtype: Optional[Union[str, torch.dtype]] = "float16", + # no `parallelize=True` options + # no PEFT and quantization options + # Mamba does not support arbitrary HF from_pretrained() args + **kwargs, + ) -> None: + try: + from mamba_ssm.models.mixer_seq_simple import MambaLMHeadModel # noqa: F811 + except ModuleNotFoundError: + raise Exception( + "attempted to use 'mamba_ssm' LM type, but package `mamba_ssm` is not installed. \ +please install mamba via `pip install lm-eval[mamba]` or `pip install -e .[mamba]`", + ) + + self._model = MambaLMHeadModel.from_pretrained( + pretrained, + device=self._device, + dtype=torch.float16 + if dtype == "auto" + else lm_eval.models.utils.get_dtype(dtype), + ) + + def _model_generate(self, context, max_length, stop, **generation_kwargs): + for key in ("do_sample", "attention_mask"): + if key in generation_kwargs: + generation_kwargs.pop(key) + + # mamba's custom GenerationMixin currently does not support + # passing stopping criteria. + # for the time being, we simply generate to max length, + # then truncate (equivalent result) + # -- this should be revisited to speed up generation + # stopping_criteria = stop_sequences_criteria( + # self.tokenizer, stop, 1, context.shape[0] + # ) + + return self.model.generate( + input_ids=context, + max_length=max_length, + # stopping_criteria=stopping_criteria, + # pad_token_id=self.tokenizer.pad_token_id, + # use_cache=True, + **generation_kwargs, + ) diff --git a/lm-evaluation-harness/lm_eval/models/nemo_lm.py b/lm-evaluation-harness/lm_eval/models/nemo_lm.py new file mode 100644 index 0000000000000000000000000000000000000000..cb1aca1eec18a05725ffb29e15f633078cab699b --- /dev/null +++ b/lm-evaluation-harness/lm_eval/models/nemo_lm.py @@ -0,0 +1,537 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import importlib +import pathlib +from copy import deepcopy +from typing import List, Literal + +import filelock +import numpy as np +import torch +from tqdm import tqdm + +from lm_eval.api.instance import Instance +from lm_eval.api.model import LM +from lm_eval.api.registry import register_model +from lm_eval.models.utils import Collator +from lm_eval.utils import ( + eval_logger, + get_rolling_token_windows, + make_disjoint_window, + simple_parse_args_string, +) + + +def _patch_pretrained_cfg( + pretrained_cfg, trainer, tensor_model_parallel_size, pipeline_model_parallel_size +): + try: + import omegaconf + except ModuleNotFoundError: + raise Exception( + "Attempted to use 'nemo_lm' model type, but package `nemo` is not installed" + "Please install nemo following the instructions in the README: either with a NVIDIA PyTorch or NeMo container, " + "or installing nemo following https://github.com/NVIDIA/NeMo.", + ) + + omegaconf.OmegaConf.set_struct(pretrained_cfg, True) + with omegaconf.open_dict(pretrained_cfg): + attributes_to_update = { + "sequence_parallel": False, + "activations_checkpoint_granularity": None, + "activations_checkpoint_method": None, + "precision": trainer.precision, + "global_batch_size": None, + "tensor_model_parallel_size": tensor_model_parallel_size, + "pipeline_model_parallel_size": pipeline_model_parallel_size, + "apply_rope_fusion": False, + } + for name, value in attributes_to_update.items(): + if hasattr(pretrained_cfg, name): + pretrained_cfg[name] = value + return pretrained_cfg + + +def _get_target_from_class(target_class) -> str: + return f"{target_class.__module__}.{target_class.__name__}" + + +def load_model( + model_path: str, + trainer, + tensor_model_parallel_size: int, + pipeline_model_parallel_size: int, +) -> torch.nn.Module: + try: + from nemo.collections.nlp.models.language_modeling.megatron_gpt_model import ( + MegatronGPTModel, + ) + from nemo.collections.nlp.parts.nlp_overrides import NLPSaveRestoreConnector + except ModuleNotFoundError: + raise Exception( + "Attempted to use 'nemo_lm' model type, but package `nemo` is not installed" + "Please install nemo following the instructions in the README: either with a NVIDIA PyTorch or NeMo container, " + "or installing nemo following https://github.com/NVIDIA/NeMo.", + ) + model_path = pathlib.Path(model_path) + + save_restore_connector = NLPSaveRestoreConnector() + if model_path.is_dir(): + save_restore_connector.model_extracted_dir = model_path.as_posix() + pretrained_cfg = save_restore_connector.restore_from( + None, model_path.as_posix(), return_config=True, trainer=trainer + ) + if not hasattr(pretrained_cfg, "target"): + pretrained_cfg["target"] = _get_target_from_class(MegatronGPTModel) + + pretrained_cfg = _patch_pretrained_cfg( + pretrained_cfg, + trainer, + tensor_model_parallel_size=tensor_model_parallel_size, + pipeline_model_parallel_size=pipeline_model_parallel_size, + ) + + model_to_load_path = model_path + override_config = pretrained_cfg + + module_name, class_name = override_config.target.rsplit(".", 1) + model_class = getattr(importlib.import_module(module_name), class_name) + + # monkeypatch _build_tokenizer method to be process-safe + tokenizer_lock = filelock.FileLock(f"/tmp/{model_path.name}.tokenizer.lock") + + def _synced_build_tokenizer(self): + with tokenizer_lock: + self._original_build_tokenizer() + + model_class._original_build_tokenizer = model_class._build_tokenizer + model_class._build_tokenizer = _synced_build_tokenizer + + model = model_class.restore_from( + restore_path=model_to_load_path.as_posix(), + trainer=trainer, + override_config_path=override_config, + save_restore_connector=save_restore_connector, + map_location=f"cuda:{trainer.local_rank}", + ) + + model.freeze() + model.training = False + try: + # Have to turn off activations_checkpoint_method for inference + model.model.language_model.encoder.activations_checkpoint_method = None + except AttributeError: + pass + return model + + +def setup_distributed_environment(trainer): + try: + from nemo.utils.app_state import AppState + except ModuleNotFoundError: + raise Exception( + "Attempted to use 'nemo_lm' model type, but package `nemo` is not installed" + "Please install nemo following the instructions in the README: either with a NVIDIA PyTorch or NeMo container, " + "or installing nemo following https://github.com/NVIDIA/NeMo.", + ) + + def dummy(): + return + + if trainer.strategy.launcher is not None: + trainer.strategy.launcher.launch(dummy, trainer=trainer) + trainer.strategy.setup_environment() + + app_state = AppState() + + return app_state + + +@register_model("nemo_lm") +class NeMoLM(LM): + def __init__( + self, + path: str, + max_length: int = 4096, + batch_size: int = 1, + max_gen_toks: int = 256, + devices: int = 1, + num_nodes: int = 1, + tensor_model_parallel_size: int = 1, + pipeline_model_parallel_size: int = 1, + precision: Literal[ + "16-mixed", + "bf16-mixed", + "32-true", + "64-true", + 64, + 32, + 16, + "64", + "32", + "16", + "bf16", + ] = "bf16", + **kwargs, + ): + try: + from nemo.collections.nlp.modules.common.text_generation_utils import ( + generate, + ) + from nemo.collections.nlp.parts.nlp_overrides import NLPDDPStrategy + from pytorch_lightning.trainer.trainer import Trainer + + self.generate = generate + except ModuleNotFoundError: + raise Exception( + "Attempted to use 'nemo_lm' model type, but package `nemo` is not installed" + "Please install nemo following the instructions in the README: either with a NVIDIA PyTorch or NeMo container, " + "or installing nemo following https://github.com/NVIDIA/NeMo.", + ) + + super().__init__() + + if ( + tensor_model_parallel_size == 1 + and pipeline_model_parallel_size == 1 + and devices > 1 + ): + eval_logger.info( + f"The number of data replicas for evaluation is {devices}." + ) + eval_logger.info(f"The total number of devices is {devices}.") + eval_logger.info( + "No tensor parallelism or pipeline parallelism is applied." + ) + + elif tensor_model_parallel_size * pipeline_model_parallel_size == devices: + eval_logger.info( + f"Setting tensor parallelism to {tensor_model_parallel_size} and pipeline parallelism to {pipeline_model_parallel_size}." + ) + eval_logger.info(f"The total number of devices is {devices}.") + eval_logger.info("No data parallelism is applied.") + + else: + raise ValueError( + "Please set the product of tensor_model_parallel_size and pipeline_model_parallel_size" + "equal to the specified number of devices." + ) + + if num_nodes > 1: + raise ValueError( + "A number of nodes greater than 1 is not supported yet. Please set num_nodes as 1." + ) + + trainer = Trainer( + strategy=NLPDDPStrategy(), + devices=devices, + accelerator="gpu", + num_nodes=num_nodes, + precision=precision, + logger=False, + enable_checkpointing=False, + use_distributed_sampler=False, + ) + # Modify the following flags only for data replication + if ( + tensor_model_parallel_size == 1 + and pipeline_model_parallel_size == 1 + and devices > 1 + ): + self._device = torch.device(f"cuda:{trainer.global_rank}") + self._rank = trainer.global_rank + self._world_size = trainer.world_size + self.model = load_model( + path, + trainer, + tensor_model_parallel_size=tensor_model_parallel_size, + pipeline_model_parallel_size=pipeline_model_parallel_size, + ).cuda() + self.tokenizer = self.model.tokenizer + self.app_state = setup_distributed_environment(trainer) + + self._max_length = max_length + self._batch_size = int(batch_size) + self._max_gen_toks = max_gen_toks + + @classmethod + def create_from_arg_string(cls, arg_string, additional_config=None): + args = simple_parse_args_string(arg_string) + if additional_config: + args["batch_size"] = additional_config.get("batch_size", 1) + + return cls(**args) + + @property + def eot_token_id(self): + try: + return self.tokenizer.eos_id + except AttributeError: + return None + + @property + def max_length(self): + return self._max_length + + @property + def max_gen_toks(self): + return self._max_gen_toks + + @property + def batch_size(self): + return self._batch_size + + @property + def device(self): + return self._device + + @property + def rank(self): + return self._rank + + @property + def world_size(self): + return self._world_size + + @property + def accelerator(self): + return self._Accelerator(self.world_size) + + class _Accelerator: + def __init__(self, world_size): + self.world_size = world_size + + def wait_for_everyone(self): + torch.distributed.barrier() + + def gather(self, local_tensor): + gathered_tensors = [ + torch.zeros(1, dtype=local_tensor.dtype).cuda() + for _ in range(self.world_size) + ] + torch.distributed.all_gather(gathered_tensors, local_tensor) + return torch.cat(gathered_tensors) + + def tok_encode(self, string: str): + return self.tokenizer.text_to_ids(string) + + def tok_decode(self, tokens): + return self.tokenizer.ids_to_text(tokens) + + def _encode_pair(self, context, continuation): + n_spaces = len(context) - len(context.rstrip()) + if n_spaces > 0: + continuation = context[-n_spaces:] + continuation + context = context[:-n_spaces] + whole_enc = self.tok_encode(context + continuation) + context_enc = self.tok_encode(context) + context_enc_len = len(context_enc) + continuation_enc = whole_enc[context_enc_len:] + return context_enc, continuation_enc + + def loglikelihood(self, requests): + new_reqs = [] + for context, continuation in [req.args for req in requests]: + if context == "": + # end of text as context + context_enc, continuation_enc = ( + [self.eot_token_id], + self.tok_encode(continuation), + ) + else: + context_enc, continuation_enc = self._encode_pair(context, continuation) + + new_reqs.append(((context, continuation), context_enc, continuation_enc)) + + return self._loglikelihood_tokens(new_reqs) + + def loglikelihood_rolling( + self, requests: List[Instance], disable_tqdm: bool = False + ) -> List[float]: + loglikelihoods = [] + + for (string,) in tqdm([req.args for req in requests], disable=disable_tqdm): + rolling_token_windows = list( + map( + make_disjoint_window, + get_rolling_token_windows( + token_list=self.tok_encode(string), + prefix_token=self.eot_token_id, + max_seq_len=self.max_length - 1, + context_len=1, + ), + ) + ) + + rolling_token_windows = [(None,) + x for x in rolling_token_windows] + + string_nll = self._loglikelihood_tokens( + rolling_token_windows, + ) + + # discard is_greedy + string_nll = [x[0] for x in string_nll] + + string_nll = sum(string_nll) + loglikelihoods.append(string_nll) + return loglikelihoods + + def _loglikelihood_tokens(self, requests, disable_tqdm=False): + res = [] + + def _collate(x): + toks = x[1] + x[2] + return -len(toks), tuple(toks) + + re_ord = Collator(requests, sort_fn=_collate) + chunks = re_ord.get_batched(n=self.batch_size, batch_fn=None) + pbar = tqdm( + total=len(requests), + disable=(disable_tqdm or (self.rank != 0)), + desc="Running loglikelihood requests", + ) + for chunk in chunks: + inps = [] + ctxlens = [] + contlens = [] + + for _, context_enc, continuation_enc in chunk: + # Leave one token for generation. Tokens_to_generate = 0 breaks NeMo. + inp = (context_enc + continuation_enc)[-(self.max_length - 1) :] + + ctxlen = len(context_enc) - max( + 0, len(context_enc) + len(continuation_enc) - (self.max_length - 1) + ) + ctxlens.append(ctxlen) + contlens.append(len(continuation_enc)) + + inps.append(self.tok_decode(inp)) + + output = self.generate( + self.model, + inputs=inps, + tokens_to_generate=1, + min_tokens_to_generate=1, + compute_logprob=True, + all_probs=True, + ) + + batch_token_ids = np.asarray(output["token_ids"])[:, :-1] + batch_logprobs = output["logprob"][:, :-1] + batch_full_logprob = output["full_logprob"][:, :-1, :] + + # Compute greedy tokens for entire batch rather than calling it with proper ctxlen for each sample. + # Additional tokens for each sample will be trimmed later. + min_ctxlen = min(ctxlens) + + # Use min_ctxlen-1 instead of min_ctxlen since full_logprobs are not returns for the first token. + batch_greedy_tokens = ( + torch.argmax(batch_full_logprob[:, min_ctxlen - 1 :, :], -1) + .cpu() + .numpy() + ) + + for token_ids, greedy_tokens, logprobs, ctxlen, contlen, ( + cache_key, + _, + _, + ) in zip( + batch_token_ids, + batch_greedy_tokens, + batch_logprobs, + ctxlens, + contlens, + chunk, + ): + # Trim at contlen since shorter contexts in a batch will have more than one token generated. + # Use ctxlen-1 instead of ctxlen same as for full_logprob in batch_greedy_tokens calculation + logprobs = (logprobs[ctxlen - 1 :])[:contlen] + logprob = sum(logprobs).tolist() + + continuation_tokens = (token_ids[ctxlen:])[:contlen] + len_diff = ctxlen - min_ctxlen + is_greedy = continuation_tokens == (greedy_tokens[len_diff:])[:contlen] + if not isinstance(is_greedy, bool): + is_greedy = is_greedy.all() + answer = (logprob, is_greedy) + + if cache_key is not None: + self.cache_hook.add_partial("loglikelihood", cache_key, answer) + + res.append(answer) + pbar.update(1) + + pbar.close() + + return re_ord.get_original(res) + + def generate_until(self, requests): + if not requests: + return [] + res = [] + + def get_until(req_args): + until = req_args.get("until", []) + until = deepcopy(until) # prevent from modifying req_args for cache_key + if self.tokenizer.ids_to_tokens([self.eot_token_id])[0] not in until: + until.append(self.tokenizer.ids_to_tokens([self.eot_token_id])[0]) + return until + + def _collate(x): + toks = self.tok_encode(x[0]) + return len(toks), x[0] + + re_ords = Collator( + [reg.args for reg in requests], sort_fn=_collate, group_by="gen_kwargs" + ) + chunks = re_ords.get_batched(n=self.batch_size, batch_fn=None) + for chunk in chunks: + contexts, all_gen_kwargs = zip(*chunk) + # we assume all gen kwargs in the batch are the same + # this is safe to assume because the `grouper` object ensures it. + req_args = all_gen_kwargs[0] + # unpack our keyword arguments. + until = get_until(req_args) + max_gen_toks = req_args.get("max_gen_toks", self.max_gen_toks) + + remaining_length = self.max_length - max_gen_toks + contexts = [] + for context, _ in chunk: + encoded_context = self.tok_encode(context) + encoded_context = encoded_context[-remaining_length:] + contexts.append(self.tok_decode(encoded_context)) + + output = self.generate( + self.model, + inputs=contexts, + tokens_to_generate=max_gen_toks, + end_strings=until, + greedy=True, + ) + + answers = output["sentences"] + + continuations = [] + for context, answer in zip(contexts, answers): + continuations.append(answer[len(context) :]) + + for term in until: + continuations = [answer.split(term)[0] for answer in continuations] + + for request, answer in zip(chunk, continuations): + self.cache_hook.add_partial("greedy_until", request, answer) + res.append(answer) + + return re_ords.get_original(res) diff --git a/lm-evaluation-harness/lm_eval/models/neuralmagic.py b/lm-evaluation-harness/lm_eval/models/neuralmagic.py new file mode 100644 index 0000000000000000000000000000000000000000..7c16b06d50b2b8117cf0b6d6b33d9d4a2b681923 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/models/neuralmagic.py @@ -0,0 +1,426 @@ +import copy +from typing import List, Optional, Tuple, Union + +import numpy +import transformers +from tqdm import tqdm + +import lm_eval.models.utils +from lm_eval import utils +from lm_eval.api.instance import Instance +from lm_eval.api.model import LM +from lm_eval.api.registry import register_model +from lm_eval.models.huggingface import HFLM + + +eval_logger = utils.eval_logger + + +@register_model("sparseml") +class SparseMLLM(HFLM): + """ + SparseML is an open-source model optimization toolkit that enables you to create + inference-optimized sparse models using pruning, quantization, and distillation + algorithms. Models optimized with SparseML can then be exported to the ONNX format and + deployed with DeepSparse for GPU-class performance on CPU hardware. + + This class is a wrapper around the HuggingFace LM class to enable SparseML + integration with the lm-evaluation-harness. + """ + + def _create_model( + self, + pretrained: str, + revision: Optional[str] = "main", + dtype: Optional[str] = "auto", + trust_remote_code: Optional[bool] = False, + **kwargs, + ) -> None: + try: + from sparseml.transformers import SparseAutoModelForCausalLM + except ModuleNotFoundError: + raise Exception( + "Package `sparseml` is not installed. " + "Please install it via `pip install sparseml[transformers]`" + ) + + model_kwargs = kwargs if kwargs else {} + + if "device_map" not in model_kwargs: + # set a device_map to initialize model on the right GPU. + # this is needed because it seems that the default behavior + # for quantized models now seems to be device_map="auto" + # which breaks data-parallel mode. + if hasattr(self, "accelerator"): + model_kwargs.update( + {"device_map": {"": f"cuda:{self.accelerator.local_process_index}"}} + ) + else: + model_kwargs.update({"device_map": {"": str(self.device)}}) + + relevant_kwarg_names = [ + "offload_folder", + "device_map", + ] + relevant_kwargs = { + k: v for k, v in model_kwargs.items() if k in relevant_kwarg_names + } + + # Log the difference between model_kwargs and relevant_kwargs so we can see + # what is being ignored + ignored_kwargs = {} + for k, v in model_kwargs.items(): + if k not in relevant_kwargs.keys(): + ignored_kwargs[k] = v + eval_logger.warning( + f"The sparseml integration is ignoring the following kwargs that are specified: {ignored_kwargs}" + ) + + model = SparseAutoModelForCausalLM.from_pretrained( + pretrained, + revision=revision, + torch_dtype=lm_eval.models.utils.get_dtype(dtype), + trust_remote_code=trust_remote_code, + **relevant_kwargs, + ) + self._model = model + + def _get_config(self, pretrained: str, **kwargs) -> None: + try: + from sparseml.transformers import SparseAutoConfig + except ModuleNotFoundError: + raise Exception( + "Package `sparseml` is not installed. " + "Please install it via `pip install sparseml[transformers]`" + ) + + self._config = SparseAutoConfig.from_pretrained( + pretrained_model_name_or_path=pretrained, **kwargs + ) + + def _create_tokenizer( + self, + pretrained: Union[str, transformers.PreTrainedModel], + tokenizer: Optional[ + Union[ + str, + transformers.PreTrainedTokenizer, + transformers.PreTrainedTokenizerFast, + ] + ], + **kwargs, + ) -> None: + try: + from sparseml.transformers import SparseAutoTokenizer + except ModuleNotFoundError: + raise Exception( + "Package `sparseml` is not installed. " + "Please install it via `pip install sparseml[transformers]`" + ) + + if tokenizer: + if isinstance(tokenizer, str): + self.tokenizer = SparseAutoTokenizer.from_pretrained( + tokenizer, + **kwargs, + ) + else: + assert isinstance( + tokenizer, transformers.PreTrainedTokenizer + ) or isinstance(tokenizer, transformers.PreTrainedTokenizerFast) + self.tokenizer = tokenizer + else: + # Get tokenizer based on 'pretrained' + if isinstance(pretrained, str): + model_name = pretrained + else: + # get the HF hub name via accessor on model + model_name = self.model.name_or_path + self.tokenizer = SparseAutoTokenizer.from_pretrained( + model_name, + **kwargs, + ) + return None + + +@register_model("deepsparse") +class DeepSparseLM(LM): + """ + Wrapper around DeepSparse, a sparsity-aware deep learning + inference runtime for CPUs, to make it compatible with the + lm-evaluation-harness. + """ + + _DEFAULT_MAX_LENGTH = 2048 + + def __init__( + self, + pretrained: str, + tokenizer: Optional[ + Union[ + str, + transformers.PreTrainedTokenizer, + transformers.PreTrainedTokenizerFast, + ] + ] = None, + batch_size: Optional[Union[int, str]] = 1, + max_gen_toks: Optional[int] = 256, + max_length: Optional[int] = None, + ): + super().__init__() + + try: + import deepsparse + except ModuleNotFoundError: + raise Exception( + "Package `deepsparse` is not installed. " + "Please install it via `pip install deepsparse[transformers]`" + ) + + if isinstance(batch_size, str) and not batch_size.isdigit(): + eval_logger.warning( + f"batch_size={batch_size} is not valid for deepsparse because it is not an integer. " + "Ignoring and using the default of 1." + ) + batch_size = 1 + + self.batch_size = int(batch_size) + self._max_length = max_length if max_length else self._DEFAULT_MAX_LENGTH + self._max_gen_toks = max_gen_toks + self.batch_sizes = {} + + # Initialize new model and tokenizer instances + self.model = deepsparse.TextGeneration( + model_path=pretrained, + sequence_length=self._max_length, + batch_size=batch_size, + ) + self.tokenizer = tokenizer if tokenizer else self.model.tokenizer + self.config = self.model.config + + def tok_encode(self, string: str) -> List[int]: + return self.tokenizer.encode(string) + + def tok_decode(self, tokens: List[int]) -> str: + return self.tokenizer.decode(tokens) + + @property + def eot_token_id(self): + # we use EOT because end of *text* is more accurate for what we're doing than end of *sentence* + return self.tokenizer.eos_token_id + + @property + def prefix_token_id(self): + # it is used as prefix for loglikelihood + if self.tokenizer.bos_token_id is not None: + return self.tokenizer.bos_token_id + return self.tokenizer.eos_token_id + + @property + def max_length(self) -> int: + return self._max_length + + @property + def max_gen_toks(self) -> int: + return self._max_gen_toks + + def loglikelihood(self, requests) -> List[Tuple[float, bool]]: + """ + Copied directly from + https://github.com/EleutherAI/lm-evaluation-harness/blob/main/lm_eval/models/huggingface.py + """ + new_reqs = [] + for context, continuation in [req.args for req in requests]: + if context == "": + raise NotImplementedError( + "Implementing empty context is not supported yet" + ) + context_enc, continuation_enc = self._encode_pair(context, continuation) + + new_reqs.append(((context, continuation), context_enc, continuation_enc)) + + return self._loglikelihood_tokens(new_reqs) + + def _loglikelihood_tokens( + self, + requests: List[Tuple[Tuple[str, str], List[int], List[int]]], + disable_tqdm: bool = False, + ) -> List[Tuple[float, bool]]: + """ + The function to compute the loglikelihood of the continuation + tokens given the context tokens. + + This function is an adapted version of the original function from + https://github.com/EleutherAI/lm-evaluation-harness/blob/main/lm_eval/models/huggingface.py + """ + res = [] + + def _collate(x): + """Defines the key for the sorted method""" + toks = x[1] + x[2] + return -len(toks), tuple(toks) + + re_ord = utils.Reorderer(requests, _collate) + + for chunk in tqdm( + list(lm_eval.models.utils.chunks(re_ord.get_reordered(), self.batch_size)), + disable=disable_tqdm, + ): + batch_inp = [] + batch_cache_key = [] + batch_continuation_enc = [] + # len(chunk) is the batch_size + for cache_key, context_enc, continuation_enc in chunk: + # how this all works (illustrated on a causal decoder-only setup): + # CTX CONT + # inp 0 1 2 3|4 5 6 7 8 9 <- last token is deleted by inp[:, :-1] + # model \ \ + # logits 1 2 3|4 5 6 7 8 9 <- the ctx half gets tossed out by the + # cont_toks 4 5 6 7 8 9 [:, -len(continuation_enc):, :self.vocab_size] slice # noqa: E501 + + inp = (context_enc + continuation_enc)[-(self.max_length + 1) :][:-1] + + batch_inp.append(self.tokenizer.decode(inp)) + batch_cache_key.append(cache_key) + batch_continuation_enc.append(continuation_enc) + + response = self.model( + prompt=batch_inp, + max_new_tokens=0, + output_scores=True, + include_prompt_logits=True, + ) + + for resp, continuation_enc, cache_key in zip( + response.generations, batch_continuation_enc, batch_cache_key + ): + # (seq_len, vocab_size) + multi_scores = resp.score + + from deepsparse.utils.data import numpy_log_softmax + + # (seq_len, vocab_size) but with softmax applied + multi_logits = numpy_log_softmax(multi_scores, axis=1) + # toss out the context half of the sequence + # (cont_len, vocab_size) + continuation_multi_logits = multi_logits[-len(continuation_enc) :] + + # pick out the logits for the continuation tokens + # (cont_len,) + continuation_logits = continuation_multi_logits[ + numpy.arange(len(continuation_enc)), continuation_enc + ] + # check if the tokens generated greedly are the same + # as the expected continuation + greedy_tokens = continuation_multi_logits.argmax(axis=1) + max_equal = greedy_tokens.tolist() == continuation_enc + + # Answer: (log prob, is-exact-match) + answer = (float(continuation_logits.sum()), bool(max_equal)) + + res.append(answer) + + if cache_key is not None: + self.cache_hook.add_partial("loglikelihood", cache_key, answer) + + return re_ord.get_original(res) + + def loglikelihood_rolling(self, requests: List[Instance]) -> List[float]: + raise NotImplementedError( + "The method not required by any of our current task integrations so far" + ) + + def generate_until(self, requests: List[Instance]) -> List[str]: + """ + The function to generate a certain number of new tokens + given a context. + + This function is an adapted version of the original function from + https://github.com/EleutherAI/lm-evaluation-harness/blob/main/lm_eval/models/openai_completions.py + """ + if not requests: + return [] + res = [] + requests = [req.args for req in requests] + + def _collate(x): + toks = self.tok_encode(x[0]) + return len(toks), x[0] + + re_ord = utils.Reorderer(requests, _collate) + + def sameuntil_chunks(xs, size): + ret = [] + lastuntil = xs[0][1] + for x in xs: + if len(ret) >= size or x[1] != lastuntil: + yield ret, lastuntil + ret = [] + lastuntil = x[1] + ret.append(x) + + if ret: + yield ret, lastuntil + + pbar = tqdm(total=len(requests)) + for chunk, request_args in tqdm( + list(sameuntil_chunks(re_ord.get_reordered(), self.batch_size)) + ): + inps = [] + + # make a deepcopy since we are changing arguments + request_args = copy.deepcopy(request_args) + + self._max_gen_toks = request_args.pop("max_gen_toks", self.max_gen_toks) + + for context, _ in chunk: + # add context (prompts) to the list + inps.append(context) + + until = request_args.pop("until", ["<|endoftext|>"]) + request_args.pop("do_sample", None) + request_args["temperature"] = request_args.get("temperature", 0) + + # run inference (generate max_gen_toks tokens) + out = self.model( + sequences=inps, + max_new_tokens=self.max_gen_toks - 1, + stop=until, + **request_args, + ) + + for resp, (context, args_) in zip(out.generations, chunk): + text = resp.text + until_ = until + # split the text at the first occurrence of any of the until tokens + for term in until_: + if len(term) > 0: + text = text.split(term)[0] + + res.append(text) + + self.cache_hook.add_partial( + "generate_until", (context, {"until": until_}), text + ) + pbar.update(1) + + pbar.close() + + return re_ord.get_original(res) + + def _encode_pair( + self, context: str, continuation: str + ) -> Tuple[List[int], List[int]]: + """ + Copied directly from + https://github.com/EleutherAI/lm-evaluation-harness/blob/main/lm_eval/models/huggingface.py + """ + n_spaces = len(context) - len(context.rstrip()) + if n_spaces > 0: + continuation = context[-n_spaces:] + continuation + context = context[:-n_spaces] + whole_enc = self.tok_encode(context + continuation) + context_enc = self.tok_encode(context) + context_enc_len = len(context_enc) + continuation_enc = whole_enc[context_enc_len:] + return context_enc, continuation_enc diff --git a/lm-evaluation-harness/lm_eval/models/neuron_optimum.py b/lm-evaluation-harness/lm_eval/models/neuron_optimum.py new file mode 100644 index 0000000000000000000000000000000000000000..9100870476d6f410096a0497470d19796e2d91e5 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/models/neuron_optimum.py @@ -0,0 +1,736 @@ +import copy +import json +import logging +import subprocess +from collections import defaultdict +from typing import List, Optional, Union + +import torch +import torch.nn.functional as F +import transformers +from packaging import version +from tqdm import tqdm +from transformers import GenerationConfig +from transformers.generation import StoppingCriteriaList + +import lm_eval.models.utils +from lm_eval import utils +from lm_eval.api.model import TemplateLM +from lm_eval.api.registry import register_model +from lm_eval.models.utils import stop_sequences_criteria + + +try: + NEURON_AVAILABLE = True + from optimum.neuron import NeuronModelForCausalLM + from optimum.neuron.generation import TokenSelector + from optimum.neuron.version import __version__ as optimum_neuron_version +except ImportError: + NeuronModelForCausalLM = object + NEURON_AVAILABLE = False + + +logger = logging.getLogger(__name__) + + +def get_nc_count() -> Union[int, None]: + """Returns the number of neuron cores on the current instance.""" + try: + cmd = "neuron-ls --json-output" + result = subprocess.run(cmd, shell=True, capture_output=True) + print(f"inferring nc_count from `neuron-ls` {result.stdout}") + json_output = json.loads(result.stdout) + count = sum([x["nc_count"] for x in json_output]) + print(f"nc_count={count}") + return count + except Exception: + return None + + +def wrap_constant_batch_size(func): + def _decorator(self, input_ids): + """input_ids a 2D array with batch_size on dim=0 + + makes sure the func runs with self.batch_size + """ + # access a from TestSample + batch_size = input_ids.shape[0] + + if batch_size < self.batch_size: + # handle the event of input_ids.shape[0] != batch_size + # Neuron cores expect constant batch_size + input_ids = torch.concat( + ( + input_ids, + # add missing_batch_size dummy + torch.zeros( + [self.batch_size - batch_size, *input_ids.size()[1:]], + dtype=input_ids.dtype, + device=input_ids.device, + ), + ), + dim=0, + ) + elif batch_size > self.batch_size: + raise ValueError( + f"The specified batch_size ({batch_size}) exceeds the model static batch size ({self.batch_size})" + ) + # return the forward pass that requires constant batch size + return func(self, input_ids)[:batch_size] + + return _decorator + + +class CustomNeuronModelForCausalLM(NeuronModelForCausalLM): + """NeuronModelForCausalLM with `stopping_criteria` in `generate`""" + + def generate( + self, + input_ids: torch.Tensor, + attention_mask: Optional[torch.Tensor] = None, + stopping_criteria: Optional["StoppingCriteriaList"] = None, + generation_config: Optional["GenerationConfig"] = None, + **kwargs, + ) -> torch.LongTensor: + r""" + A streamlined generate() method overriding the transformers.GenerationMixin.generate() method. + + This method uses the same logits processors/warpers and stopping criteria as the transformers library + `generate()` method but restricts the generation to greedy search and sampling. + + It does not support transformers `generate()` advanced options. + + Please refer to https://huggingface.co/docs/transformers/en/main_classes/text_generation#transformers.GenerationMixin.generate + for details on generation configuration. + + Parameters: + input_ids (`torch.Tensor` of shape `(batch_size, sequence_length)`): + The sequence used as a prompt for the generation. + attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing attention on padding token indices. + generation_config (`~transformers.generation.GenerationConfig`, *optional*): + The generation configuration to be used as base parametrization for the generation call. `**kwargs` + passed to generate matching the attributes of `generation_config` will override them. If + `generation_config` is not provided, default will be used, which had the following loading + priority: 1) from the `generation_config.json` model file, if it exists; 2) from the model + configuration. Please note that unspecified parameters will inherit [`~transformers.generation.GenerationConfig`]'s + default values, whose documentation should be checked to parameterize generation. + + Returns: + `torch.Tensor`: A `torch.FloatTensor`. + """ + # The actual generation configuration is a combination of config and parameters + generation_config = copy.deepcopy( + self.generation_config if generation_config is None else generation_config + ) + model_kwargs = generation_config.update( + **kwargs + ) # All unused kwargs must be model kwargs + # Check model kwargs are actually used by either prepare_inputs_for_generation or forward + self._validate_model_kwargs(model_kwargs) + + # Instantiate a TokenSelector for the specified configuration + selector = TokenSelector.create( + input_ids, generation_config, self, self.max_length + ) + selector.stopping_criteria.append(stopping_criteria) + # Verify that the inputs are compatible with the model static input dimensions + batch_size, sequence_length = input_ids.shape + if sequence_length > self.max_length: + raise ValueError( + f"The input sequence length ({sequence_length}) exceeds the model static sequence length ({self.max_length})" + ) + padded_input_ids = input_ids + padded_attention_mask = attention_mask + if batch_size > self.batch_size: + raise ValueError( + f"The specified batch_size ({batch_size}) exceeds the model static batch size ({self.batch_size})" + ) + elif batch_size < self.batch_size: + logger.warning( + "Inputs will be padded to match the model static batch size. This will increase latency." + ) + padding_shape = [self.batch_size - batch_size, sequence_length] + padding = torch.full( + padding_shape, fill_value=self.config.eos_token_id, dtype=torch.int64 + ) + padded_input_ids = torch.cat([input_ids, padding]) + if attention_mask is not None: + padding = torch.zeros(padding_shape, dtype=torch.int64) + padded_attention_mask = torch.cat([attention_mask, padding]) + # Drop the current generation context and clear the Key/Value cache + self.reset_generation() + + output_ids = self.generate_tokens( + padded_input_ids, + selector, + batch_size, + attention_mask=padded_attention_mask, + **model_kwargs, + ) + return output_ids[:batch_size, :] + + +@register_model("neuronx") +class NEURON_HF(TemplateLM): + """ + Enables usage with on AWS Neuron + using the HuggingFace Transformers + Transformers neuronx library. + Tested with neuron 2.17.0 + """ + + _DEFAULT_MAX_LENGTH = 2048 + + def __init__( + self, + pretrained: Optional[str] = "TinyLlama/TinyLlama-1.1B-Chat-v1.0", + revision: Optional[str] = "main", + tp_degree: Optional[int] = None, + subfolder: Optional[str] = None, + tokenizer: Optional[str] = None, + truncation: Optional[bool] = False, + max_length: Optional[int] = None, + dtype: Optional[Union[str, torch.dtype]] = "auto", + batch_size: Optional[int] = 1, + low_cpu_mem_usage: Optional[bool] = True, + trust_remote_code: Optional[bool] = False, + use_fast_tokenizer: Optional[bool] = True, + add_bos_token: Optional[bool] = False, + ) -> None: + if not NEURON_AVAILABLE: + raise Exception( + "Tried to load neuron model, but neuron is not installed ", + "please install neuron via pip install transformers-neuron ", + "also make sure you are running on an AWS inf2 instance", + ) + if version.parse(optimum_neuron_version) != version.parse("0.0.17"): + logger.warning( + '`optimum-neuron` model requires `pip install "optimum[neuronx]>=0.0.17" ' + "preferably using the Hugging Face Neuron Deep Learning AMI (Ubuntu 22.04) " + "https://aws.amazon.com/marketplace/pp/prodview-gr3e6yiscria2 " + f"You are using optimum-neuron={optimum_neuron_version}" + ) + super().__init__() + + assert isinstance(pretrained, str) + assert isinstance(batch_size, (int, str)) + + self.batch_size_per_gpu = int(batch_size) + batch_size = int(batch_size) + if tp_degree is None: + # execute `neuron-ls --json-output | jq '.[0].nc_count'`` + # to get the number of neuron cores on your instance + tp_degree = get_nc_count() + + assert isinstance(tp_degree, int), ( + f"model_args must include tp_degree. tp_degree must be set to an integer," + f" but is tp_degree=`{tp_degree}` with type=`{type(tp_degree)}`." + "Set it to number of neuron cores on your instance." + " For inf2.xlarge and inf2.8xlarge, set it to `2`." + " For inf2.24xlarge, set it to `12`." + " For inf2.48xlarge, set it to `24`." + ) + + # TODO: update this to be less of a hack once subfolder is fixed in HF + revision = revision + ("/" + subfolder if subfolder is not None else "") + + self._config = transformers.AutoConfig.from_pretrained( + pretrained, + revision=revision, + trust_remote_code=trust_remote_code, + ) + torch_dtype = lm_eval.models.utils.get_dtype(dtype) + + assert torch_dtype in [ + torch.float16, + torch.bfloat16, + ], "Only float16 and bfloat16 are supported" + + self.tokenizer = transformers.AutoTokenizer.from_pretrained( + pretrained if tokenizer is None else tokenizer, + revision=revision, + trust_remote_code=trust_remote_code, + use_fast=use_fast_tokenizer, + ) + + # Neuron specific code + if torch_dtype == torch.float16: + self.amp_dtype = "f16" + elif torch_dtype == torch.bfloat16: + self.amp_dtype = "bf16" + elif torch_dtype == torch.float32: + self.amp_dtype = "f32" + else: + raise NotImplementedError("Only float16 and bfloat16 are implemented.") + + compiler_args = {"num_cores": tp_degree, "auto_cast_type": self.amp_dtype} + input_shapes = { + "batch_size": batch_size, + "sequence_length": self._DEFAULT_MAX_LENGTH, + } + + print( + f"{'='*20} \n loading model to neuron with" + f" {compiler_args}, {input_shapes}..." + ) + self.model = CustomNeuronModelForCausalLM.from_pretrained( + pretrained, + revision=revision, + trust_remote_code=trust_remote_code, + low_cpu_mem_usage=low_cpu_mem_usage, + export=True, + **compiler_args, + **input_shapes, + ) + print(f"SUCCESS: neuron model compiled. \n {'='*20}") + + self.truncation = truncation + + self.vocab_size = self.tokenizer.vocab_size + self.tokenizer.pad_token_id = self.tokenizer.eos_token_id + self.add_bos_token = self.add_bos_token + + self._max_length = max_length + + self.batch_schedule = 1 + self.batch_sizes = {} + + @property + def config(self): + # return the associated transformers.AutoConfig for the given pretrained model. + return self._config + + @property + def eot_token_id(self): + # we use EOT because end of *text* is more accurate for what we're doing than end of *sentence* + return self.tokenizer.eos_token_id + + @property + def prefix_token_id(self): + # it is used as prefix for loglikelihood + return self.tokenizer.bos_token_id or self.tokenizer.eos_token_id + + @property + def max_length(self): + if self._max_length: # if max length manually set, return it + return self._max_length + seqlen_config_attrs = ("n_positions", "max_position_embeddings", "n_ctx") + for attr in seqlen_config_attrs: + if hasattr(self.model.config, attr): + return getattr(self.model.config, attr) + if hasattr(self.tokenizer, "model_max_length"): + if self.tokenizer.model_max_length == 1000000000000000019884624838656: + return self._DEFAULT_MAX_LENGTH + return self.tokenizer.model_max_length + return self._DEFAULT_MAX_LENGTH + + @property + def max_gen_toks(self) -> int: + return 256 + + @property + def batch_size(self): + return self.batch_size_per_gpu + + @property + def device(self): + """device are neuron cores, but the created tensors are on CPU.""" + return "cpu" + + @property + def rank(self): + return 0 + + @property + def world_size(self): + return 1 + + def tok_encode(self, string: str, left_truncate_len=None, add_special_tokens=None): + """ """ + if add_special_tokens is None: + add_special_tokens = False or self.add_bos_token + + encoding = self.tokenizer.encode(string, add_special_tokens=add_special_tokens) + + # left-truncate the encoded context to be at most `left_truncate_len` tokens long + if left_truncate_len: + encoding = encoding[-left_truncate_len:] + + return encoding + + def tok_batch_encode( + self, + strings: List[str], + padding_side: str = "left", + left_truncate_len: int = None, + truncation: bool = False, + ): + # encode a batch of strings. converts to tensors and pads automatically, unlike tok_encode. + old_padding_side = self.tokenizer.padding_side + self.tokenizer.padding_side = padding_side + + add_special_tokens = False or self.add_bos_token + + encoding = self.tokenizer( + strings, + truncation=truncation, + padding="longest", + return_tensors="pt", + add_special_tokens=add_special_tokens, + ) + if left_truncate_len: + encoding["input_ids"] = encoding["input_ids"][:, -left_truncate_len:] + encoding["attention_mask"] = encoding["attention_mask"][ + :, -left_truncate_len: + ] + self.tokenizer.padding_side = old_padding_side + + return encoding["input_ids"], encoding["attention_mask"] + + def tok_decode(self, tokens): + return self.tokenizer.decode(tokens) + + @wrap_constant_batch_size + def _model_call(self, input_ids: torch.Tensor): + """ + get logits for the entire sequence + + :param input_ids: torch.Tensor + A torch tensor of shape [batch, sequence_cont] + the size of sequence may vary from call to call + :return + A torch tensor of shape [batch, sequence, vocab] with the + logits returned from the model's decoder-lm head + """ + _, sequence_length = input_ids.shape + + with torch.inference_mode(): + cache_ids = torch.arange(0, sequence_length, dtype=torch.int32).split(1) + input_ids_split = input_ids.split(1, dim=1) + + return torch.concat( + [ + self.model.forward( + input_ids=input_id, cache_ids=cache_id, return_dict=False + )[0] + for input_id, cache_id in zip(input_ids_split, cache_ids) + ], + dim=1, + ) + + def _model_generate(self, context, max_length, stop, **generation_kwargs): + # we require users to pass do_sample=True explicitly + # for non-greedy gen. This should be reevaluated when considering beam search. + + with torch.inference_mode(): + if "do_sample" not in generation_kwargs.keys(): + generation_kwargs["do_sample"] = False + + stopping_criteria = stop_sequences_criteria( + self.tokenizer, + stop + [self.tokenizer.decode([self.config.eos_token_id])], + 1, + context.shape[0], + ) + + return self.model.generate( + input_ids=context, + max_length=max_length, + stopping_criteria=stopping_criteria, + pad_token_id=self.eot_token_id, + use_cache=True, + **generation_kwargs, + ) + + def _select_cont_toks(self, logits, contlen=None, inplen=None): + assert ( + contlen and inplen + ), "Must pass input len and cont. len to select scored logits for causal LM" + # discard right-padding. + # also discard the input/context tokens. we'll only score continuations. + logits = logits[inplen - contlen : inplen] + + return logits + + def loglikelihood_rolling(self, requests, disable_tqdm: bool = False): + loglikelihoods = [] + + adaptive_batch_size = None + + for (string,) in tqdm( + [req.args for req in requests], disable=(disable_tqdm or (self.rank != 0)) + ): + rolling_token_windows = list( + map( + utils.make_disjoint_window, + utils.get_rolling_token_windows( + token_list=self.tok_encode(string), + prefix_token=self.prefix_token_id, + max_seq_len=self.max_length, + context_len=1, + ), + ) + ) + + # TODO: Right now, we pass single EOT token to the Encoder and the full context to the decoder, in seq2seq case + rolling_token_windows = [(None,) + x for x in rolling_token_windows] + + pad_amnt = 0 + if self.world_size > 1: + # We pad out the external document-level iterator so the inner iterator doesn't hang + mytensor = torch.tensor(len(rolling_token_windows), device=self.device) + gathered = ( + self.accelerator.gather(mytensor).cpu().detach().numpy().tolist() + ) + + pad_amnt = max(gathered) - gathered[self.rank] + if pad_amnt > 0: + rolling_token_windows += pad_amnt * [rolling_token_windows[0]] + + string_nll = self._loglikelihood_tokens( + rolling_token_windows, + disable_tqdm=True, + override_bs=adaptive_batch_size, + ) + + if (self.world_size > 1) and (pad_amnt > 0): + string_nll = [x[0] for x in string_nll[:-pad_amnt]] + else: + # discard is_greedy + string_nll = [x[0] for x in string_nll] + + string_nll = sum(string_nll) + loglikelihoods.append(string_nll) + + return loglikelihoods + + def _loglikelihood_tokens( + self, requests, disable_tqdm: bool = False, override_bs=None + ): + # TODO: implement some kind of efficient-request-middleware that lumps together requests with the same context + res = [] + + def _collate(x): + # the negative sign on len(toks) sorts descending - this has a few advantages: + # - time estimates will always be over not underestimates, which is more useful for planning + # - to know the size of a batch when going through the list, you know the first one is always the batch + # padded context length. this is useful to simplify the batching logic and more importantly to make + # automatic adaptive batches much much easier to implement + # - any OOMs will happen right away rather than near the end + + toks = x[1] + x[2] + return -len(toks), tuple(toks) + + re_ord = utils.Reorderer(requests, _collate) + + n_reordered_requests = len(re_ord.get_reordered()) # noqa + # automatic (variable) batch size detection for vectorization + # pull longest context sample from request + + chunks = lm_eval.models.utils.chunks( + re_ord.get_reordered(), + n=self.batch_size, + fn=None, + ) + + for chunk in tqdm(chunks, disable=(disable_tqdm or (self.rank != 0))): + inps = [] + cont_toks_list = [] + inplens = [] + + conts = [] # noqa + encoder_attns = [] # noqa + + padding_len_inp = None + padding_len_cont = None # noqa + # because vectorizing is annoying, we first convert each (context, continuation) pair to padded + # tensors, then we pack them together into a batch, call the model, and then pick it all apart + # again because vectorizing is annoying + + for _, context_enc, continuation_enc in chunk: + # sanity check + assert len(context_enc) > 0 + assert len(continuation_enc) > 0 + assert len(continuation_enc) <= self.max_length + + # how this all works (illustrated on a causal decoder-only setup): + # CTX CONT + # inp 0 1 2 3|4 5 6 7 8 9 <- last token is deleted by inp[:, :-1] + # model \ \ + # logits 1 2 3|4 5 6 7 8 9 <- the ctx half gets tossed out by the + # cont_toks 4 5 6 7 8 9 [:, -len(continuation_enc):, :self.vocab_size] slice + + # when too long to fit in context, truncate from the left + inp = torch.tensor( + (context_enc + continuation_enc)[-(self.max_length + 1) :][:-1], + dtype=torch.long, + device=self.device, + ) + (inplen,) = inp.shape + + padding_len_inp = ( + max(padding_len_inp, inplen) + if padding_len_inp is not None + else inplen + ) + + inps.append(inp) # [1, inp_length] + cont_toks_list.append(continuation_enc) + inplens.append(inplen) + + # create encoder attn mask and batched conts, if seq2seq + call_kwargs = {} + batched_inps = lm_eval.models.utils.pad_and_concat( + padding_len_inp, inps, padding_side="right" + ) # [batch, padding_len_inp] + + multi_logits = F.log_softmax( + self._model_call(batched_inps, **call_kwargs), dim=-1 + ) # [batch, padding_length (inp or cont), vocab] + + for (cache_key, _, _), logits, inplen, cont_toks in zip( + chunk, multi_logits, inplens, cont_toks_list + ): + # Slice to original seq length + contlen = len(cont_toks) + # take only logits in the continuation + # (discard context toks if decoder-only ; discard right-padding) + # also discards + checks for "virtual tokens" in the causal LM's input window + # from prompt/prefix tuning tokens, if applicable + ctx_len = inplen + (logits.shape[0] - padding_len_inp) + logits = self._select_cont_toks(logits, contlen=contlen, inplen=ctx_len) + logits = logits.unsqueeze(0) # [1, seq, vocab] + + # Check if per-token argmax is exactly equal to continuation + greedy_tokens = logits.argmax(dim=-1) + cont_toks = torch.tensor( + cont_toks, dtype=torch.long, device=self.device + ).unsqueeze(0) # [1, seq] + max_equal = (greedy_tokens == cont_toks).all() + + # Obtain log-probs at the corresponding continuation token indices + # last_token_slice = logits[:, -1, :].squeeze(0).tolist() + logits = torch.gather(logits, 2, cont_toks.unsqueeze(-1)).squeeze( + -1 + ) # [1, seq] + + # Answer: (log prob, is-exact-match) + answer = (float(logits.sum()), bool(max_equal)) + + res.append(answer) + + self.cache_hook.add_partial("loglikelihood", cache_key, answer) + + return re_ord.get_original(res) + + def generate_until(self, requests, disable_tqdm: bool = False): + res = defaultdict(list) + re_ords = {} + + def _collate(x): + # the negative sign on len(toks) sorts descending - this has a few advantages: + # - time estimates will always be over not underestimates, which is more useful for planning + # - to know the size of a batch when going through the list, you know the first one is always the batch + # padded context length. this is useful to simplify the batching logic and more importantly to make + # automatic adaptive batches much much easier to implement + # - any OOMs will happen right away rather than near the end + toks = self.tok_encode(x[0]) + return -len(toks), x[0] + + # we group requests by their generation_kwargs, + # so that we don't try to execute e.g. greedy sampling and temp=0.8 sampling + # in the same batch. + grouper = lm_eval.models.utils.Grouper(requests, lambda x: str(x.args[1])) + for key, reqs in grouper.get_grouped().items(): + # within each set of reqs for given kwargs, we reorder by token length, descending. + re_ords[key] = utils.Reorderer([req.args for req in reqs], _collate) + + pbar = tqdm(total=len(requests), disable=(disable_tqdm or (self.rank != 0))) + + # for each different set of kwargs, we execute all requests, by batch. + for key, re_ord in re_ords.items(): + chunks = lm_eval.models.utils.chunks( + re_ord.get_reordered(), n=self.batch_size + ) + for chunk in tqdm(chunks, disable=self.rank != 0): + contexts, all_gen_kwargs = zip(*chunk) + # we assume all gen kwargs in the batch are the same + # this is safe to assume because the `grouper` object ensures it. + gen_kwargs = all_gen_kwargs[0] + # unpack our keyword arguments. + until = None + if isinstance(gen_kwargs, dict): + kwargs = copy.deepcopy(gen_kwargs) # edge case for repeats > 1 + if "until" in kwargs.keys(): + until = kwargs.pop("until") + if isinstance(until, str): + until = [until] + elif not isinstance(until, list): + raise ValueError( + f"Expected `kwargs['until']` to be of type Union[str,list] but got {until}" + ) + else: + raise ValueError( + f"Expected `kwargs` to be of type `dict` but got {kwargs}" + ) + # add EOS token to stop sequences + eos = self.tok_decode(self.eot_token_id) + if not until: + until = [eos] + else: + until.append(eos) + if "max_gen_toks" in kwargs.keys(): + max_gen_toks = kwargs.pop("max_gen_toks") + else: + max_gen_toks = self.max_gen_toks + # first stop sequence is used to halt generation upon encountering + primary_until = [until[0]] + + max_ctx_len = self.max_length - max_gen_toks + + # encode, pad, and truncate contexts for this batch + context_enc, attn_masks = self.tok_batch_encode( + contexts, + left_truncate_len=max_ctx_len, + truncation=self.truncation, + ) + context_enc = context_enc.to(self.device) + attn_masks = attn_masks.to(self.device) + + if "max_length" not in kwargs: + kwargs["max_length"] = context_enc.shape[1] + max_gen_toks + + # perform batched generation + cont = self._model_generate( + context=context_enc, + attention_mask=attn_masks, + stop=primary_until, + **kwargs, + ) + + cont_toks_list = cont.tolist() + for cont_toks, context in zip(cont_toks_list, contexts): + # discard context + left-padding toks if using causal decoder-only LM + cont_toks = cont_toks[context_enc.shape[1] :] + + s = self.tok_decode(cont_toks) + + # use secondary stop seqs to cut off should-have-been-stopped content post-hoc + for term in until: + if len(term) > 0: + # ignore '' separator, + # for seq2seq case where self.tok_decode(self.eot_token_id) = '' + s = s.split(term)[0] + + res[key].append(s) + + self.cache_hook.add_partial( + "generate_until", (context, gen_kwargs), s + ) + pbar.update(1) + # reorder this group of results back to original unsorted form + res[key] = re_ord.get_original(res[key]) + + pbar.close() + + return grouper.get_original(res) diff --git a/lm-evaluation-harness/lm_eval/models/openai_completions.py b/lm-evaluation-harness/lm_eval/models/openai_completions.py new file mode 100644 index 0000000000000000000000000000000000000000..87e4bd2bbe26ec015f60f7c26b8d2ad16c976cee --- /dev/null +++ b/lm-evaluation-harness/lm_eval/models/openai_completions.py @@ -0,0 +1,481 @@ +import copy +import os +from collections import defaultdict +from importlib.util import find_spec +from typing import List, Literal, Optional, Tuple + +from tqdm import tqdm + +import lm_eval.models.utils +from lm_eval import utils +from lm_eval.api.model import LM, TemplateLM +from lm_eval.api.registry import register_model +from lm_eval.models.utils import retry_on_specific_exceptions +from lm_eval.utils import eval_logger + + +def get_result(response, ctxlen: int) -> Tuple[float, bool]: + """Process results from OpenAI API response. + + :param response: dict + OpenAI API Response + :param ctxlen: int + Length of context (so we can slice them away and only keep the predictions) + :return: + continuation_logprobs: np.array + Log probabilities of continuation tokens + is_greedy: bool + whether argmax matches given continuation exactly + """ + is_greedy = True + logprobs = response.logprobs.token_logprobs + continuation_logprobs = sum(logprobs[ctxlen:]) + + for i in range(ctxlen, len(response.logprobs.token_logprobs)): + token = response.logprobs.token_logprobs[i] + top_tokens = response.logprobs.top_logprobs[i] + top_token = max(top_tokens.keys(), key=lambda x: top_tokens[x]) + if top_token != token: + is_greedy = False + break + + return continuation_logprobs, is_greedy + + +def oa_completion(client, chat: bool = False, **kwargs): + """Query OpenAI API for completion. + + Retry with back-off until they respond + """ + if not find_spec("openai") or not find_spec("tiktoken"): + raise Exception( + "attempted to use 'openai' LM type, but package `openai` or `tiktoken` are not installed. " + "Please install these via `pip install lm-eval[openai]` or `pip install -e .[openai]`" + ) + else: + import openai + + def _exception_callback(e: Exception, sleep_time: float) -> None: + import traceback + + traceback.print_exc() + + @retry_on_specific_exceptions( + on_exceptions=[openai.OpenAIError], + max_retries=None, # retry forever, consider changing + on_exception_callback=_exception_callback, + ) + def completion(): + if chat: + return client.chat.completions.create(**kwargs) + else: + return client.completions.create(**kwargs) + + return completion() + + +@register_model("openai-completions", "local-completions") +class OpenaiCompletionsLM(TemplateLM): + _DEFAULT_MAX_LENGTH = 2048 + + def __init__( + self, + model: str, + base_url: str = None, + tokenizer: Optional[str] = None, + tokenizer_backend: Literal["tiktoken", "huggingface"] = "tiktoken", + truncate: bool = False, + max_gen_toks: int = 256, + batch_size: int = 1, + seed: int = 1234, + max_length: Optional[int] = None, + ) -> None: + """ + + :param engine: str + OpenAI API engine (e.g. gpt-3.5-turbo-instruct) + :param truncate: bool + Truncate input if too long (if False and input is too long, throw error) + """ + super().__init__() + self.seed = seed + try: + import openai # noqa: E401 + import tiktoken + except ModuleNotFoundError: + raise Exception( + "attempted to use 'openai' LM type, but package `openai` or `tiktoken` are not installed. \ + please install these via `pip install lm-eval[openai]` or `pip install -e .\"[openai]\"`", + ) + self.model = model + self.base_url = base_url + self.tokenizer_backend = tokenizer_backend + self.truncate = truncate + self._batch_size = int(batch_size) + self._max_gen_toks = max_gen_toks + self._max_length = max_length + + # if we have a local model, use HF tokenizer over tiktoken + if self.tokenizer_backend == "huggingface": + import transformers # noqa: E401 + + self.tokenizer = transformers.AutoTokenizer.from_pretrained( + tokenizer if tokenizer else self.model + ) + self.vocab_size = self.tokenizer.vocab + self.end_of_text_token_id = self.tokenizer.eos_token + elif self.tokenizer_backend == "tiktoken": + if self.base_url: + eval_logger.warning( + f"Passed `base_url={self.base_url}` but using Tiktoken tokenizer backend. " + "Pass `tokenizer_backend=huggingface` and provide the HF tokenizer name if your model does not use Tiktoken." + ) + + self.tokenizer = tiktoken.encoding_for_model(self.model) + self.vocab_size = self.tokenizer.n_vocab + self.end_of_text_token_id = self.tokenizer.eot_token + else: + raise ValueError( + f"Expected tokenizer_backend to be one of ['tiktoken', 'huggingface'] but got {self.tokenizer_backend}" + ) + + # Read from environment variable OPENAI_API_KEY + # Set to EMPTY for local + openai.api_key = os.environ["OPENAI_API_KEY"] + if self.base_url: + self.client = openai.OpenAI(base_url=self.base_url) + else: + self.client = openai.OpenAI() + + @property + def eot_token_id(self): + return self.end_of_text_token_id + + @property + def max_length(self) -> int: + if self._max_length: + return self._max_length + else: + return self._DEFAULT_MAX_LENGTH + + @property + def max_gen_toks(self) -> int: + return self._max_gen_toks + + @property + def batch_size(self) -> int: + return self._batch_size + + @property + def device(self): + # Isn't used because we override _loglikelihood_tokens + raise NotImplementedError() + + def tok_encode(self, string: str, **kwargs) -> List[int]: + return self.tokenizer.encode(string) + + def tok_decode(self, tokens: List[int]) -> str: + return self.tokenizer.decode(tokens) + + def _loglikelihood_tokens( + self, requests, disable_tqdm: bool = False + ) -> List[Tuple[float, bool]]: + res = [] + + def _collate(x): + # this doesn't efficiently handle last-token differences yet, but those are kinda annoying because + # it's not guaranteed that the 100 or so logprobs we get to see actually contain all the continuations + # we care about, and so we need some kind of backup for when it isn't + toks = x[1] + x[2] + return -len(toks), tuple(toks) + + re_ord = utils.Reorderer(requests, _collate) + + for chunk in tqdm( + list(lm_eval.models.utils.chunks(re_ord.get_reordered(), self.batch_size)), + disable=disable_tqdm, + ): + inps = [] + ctxlens = [] + for cache_key, context_enc, continuation_enc in chunk: + # max_length+1 because the API takes up to 2049 tokens, including the first context token + inp = (context_enc + continuation_enc)[-(self.max_length + 1) :] + # TODO: the logic is much simpler if we just look at the length of continuation tokens + ctxlen = len(context_enc) - max( + 0, len(context_enc) + len(continuation_enc) - (self.max_length + 1) + ) + + inps.append(inp) + ctxlens.append(ctxlen) + + response = oa_completion( + client=self.client, + model=self.model, + prompt=inps, + echo=True, + max_tokens=0, + temperature=0.0, + logprobs=10, + seed=self.seed, + ) + + for resp, ctxlen, (cache_key, context_enc, continuation_enc) in zip( + response.choices, ctxlens, chunk + ): + answer = get_result(resp, ctxlen) + + res.append(answer) + + # partial caching + if cache_key is not None: + self.cache_hook.add_partial("loglikelihood", cache_key, answer) + return re_ord.get_original(res) + + def generate_until(self, requests, disable_tqdm: bool = False) -> List[str]: + if not requests: + return [] + res = [] + requests = [req.args for req in requests] + + def _collate(x): + toks = self.tok_encode(x[0]) + return len(toks), x[0] + + re_ord = utils.Reorderer(requests, _collate) + + def sameuntil_chunks(xs, size): + ret = [] + lastuntil = xs[0][1] + for x in xs: + if len(ret) >= size or x[1] != lastuntil: + yield ret, lastuntil + ret = [] + lastuntil = x[1] + ret.append(x) + + if ret: + yield ret, lastuntil + + # todo: more intelligent batching for heterogeneous `until` + for chunk, request_args in tqdm( + list(sameuntil_chunks(re_ord.get_reordered(), self.batch_size)), + disable=disable_tqdm, + ): + inps = [] + self._max_gen_toks = request_args.get("max_gen_toks", self.max_gen_toks) + for context, _ in chunk: + context_enc = self.tok_encode(context) + inp = context_enc[-(self.max_length - self.max_gen_toks) :] + inps.append(inp) + + until = request_args.get("until", ["<|endoftext|>"]) + request_args["temperature"] = request_args.get("temperature", 0) + + response = oa_completion( + client=self.client, + model=self.model, + prompt=inps, + max_tokens=self.max_gen_toks, + stop=until, + seed=self.seed, + **{ + k: v + for k, v in request_args.items() + if k not in {"do_sample", "max_gen_toks", "until"} + }, + ) + for resp, (context, args_) in zip(response.choices, chunk): + s = getattr(resp, "text") + + until_ = until + + for term in until_: + if len(term) > 0: + s = s.split(term)[0] + + # partial caching + self.cache_hook.add_partial( + "generate_until", (context, {"until": until_}), s + ) + + res.append(s) + return re_ord.get_original(res) + + def _model_call(self, inps): + # Isn't used because we override _loglikelihood_tokens + raise NotImplementedError() + + def _model_generate(self, context, max_length, eos_token_id): + # Isn't used because we override generate_until + raise NotImplementedError() + + def loglikelihood_rolling( + self, requests, disable_tqdm: bool = False + ) -> List[float]: + loglikelihoods = [] + + for (string,) in tqdm([req.args for req in requests], disable=disable_tqdm): + rolling_token_windows = list( + map( + utils.make_disjoint_window, + utils.get_rolling_token_windows( + token_list=self.tok_encode(string), + prefix_token=self.eot_token_id, + max_seq_len=self.max_length, + context_len=1, + ), + ) + ) + + # TODO: Right now, we pass single EOT token to the Encoder and the full context to the decoder, in seq2seq case + rolling_token_windows = [(None,) + x for x in rolling_token_windows] + + string_nll = self._loglikelihood_tokens( + rolling_token_windows, + disable_tqdm=True, + ) + + # discard is_greedy + string_nll = [x[0] for x in string_nll] + + string_nll = sum(string_nll) + loglikelihoods.append(string_nll) + return loglikelihoods + + +@register_model("openai-chat-completions", "local-chat-completions") +class OpenaiChatCompletionsLM(LM): + def __init__( + self, + model: str = "gpt-3.5-turbo", # GPT model or Local model using HuggingFace model paths + base_url: str = None, + truncate: bool = False, + **kwargs, + ) -> None: + """ + + :param model: str + Implements an OpenAI-style chat completion API for + accessing both OpenAI OR locally-hosted models using + HuggingFace Tokenizer + OpenAI API model (e.g. gpt-3.5-turbo) + using the **gen_kwargs passed on init + :param truncate: bool + Truncate input if too long (if False and input is too long, throw error) + """ + super().__init__() + try: + import openai # noqa: E401 + except ModuleNotFoundError: + raise Exception( + "attempted to use 'openai' LM type, but package `openai` or `tiktoken` are not installed. \ + please install these via `pip install lm-eval[openai]` or `pip install -e .[openai]`", + ) + self.model = model + self.base_url = base_url + self.truncate = truncate + + # Read from environment variable OPENAI_API_KEY + # Set to EMPTY for local + if self.base_url: + self.client = openai.OpenAI(base_url=self.base_url) + else: + self.client = openai.OpenAI() # openai.AsyncOpenAI() + + @property + def max_length(self) -> int: + # Note: the OpenAI API supports up to 2049 tokens, with the first token being the first input token + return 2048 + + @property + def max_gen_toks(self) -> int: + return 256 + + @property + def batch_size(self): + # Isn't used because we override _loglikelihood_tokens + raise NotImplementedError() + + @property + def device(self): + # Isn't used because we override _loglikelihood_tokens + raise NotImplementedError() + + def generate_until(self, requests, disable_tqdm: bool = False) -> List[str]: + res = defaultdict(list) + re_ords = {} + + # we group requests by their generation_kwargs, + # so that we don't try to execute e.g. greedy sampling and temp=0.8 sampling + # in the same batch. + grouper = lm_eval.models.utils.Grouper(requests, lambda x: str(x.args[1])) + for key, reqs in grouper.get_grouped().items(): + # within each set of reqs for given kwargs, we reorder by token length, descending. + re_ords[key] = utils.Reorderer( + [req.args for req in reqs], lambda x: (-len(x[0]), x[0]) + ) + + pbar = tqdm(total=len(requests), disable=(disable_tqdm or (self.rank != 0))) + for key, re_ord in re_ords.items(): + # n needs to be 1 because messages in + # chat completion are not batch but + # is regarded as a single conversation. + chunks = lm_eval.models.utils.chunks(re_ord.get_reordered(), n=1) + for chunk in chunks: + contexts, all_gen_kwargs = zip(*chunk) + inps = [{"role": "user", "content": context} for context in contexts] + + gen_kwargs = all_gen_kwargs[0] + until = None + if isinstance(kwargs := copy.deepcopy(gen_kwargs), dict): + if "do_sample" in kwargs.keys(): + kwargs.pop("do_sample") + if "until" in kwargs.keys(): + until = kwargs.pop("until") + if isinstance(until, str): + until = [kwargs] + elif not isinstance(until, list): + raise ValueError( + f"Expected repr(kwargs['until']) to be of type Union[str, list] but got {until}" + ) + kwargs["stop"] = until + kwargs["max_tokens"] = kwargs.pop("max_gen_toks", self.max_gen_toks) + else: + raise ValueError( + f"Expected repr(kwargs) to be of type repr(dict) but got {kwargs}" + ) + + response = oa_completion( + client=self.client, + chat=True, + messages=inps, + model=self.model, + **kwargs, + ) + + for resp, (context, args_) in zip(response.choices, chunk): + s = resp.message.content + + if until is not None: + for term in until: + if len(term) > 0: + s = s.split(term)[0] + + res[key].append(s) + + self.cache_hook.add_partial( + "generate_until", (context, {"until": until}), s + ) + pbar.update(1) + # reorder this group of results back to original unsorted form + res[key] = re_ord.get_original(res[key]) + + pbar.close() + + return grouper.get_original(res) + + def loglikelihood(self, requests, disable_tqdm: bool = False): + raise NotImplementedError("No support for logits.") + + def loglikelihood_rolling(self, requests, disable_tqdm: bool = False): + raise NotImplementedError("No support for logits.") diff --git a/lm-evaluation-harness/lm_eval/models/optimum_lm.py b/lm-evaluation-harness/lm_eval/models/optimum_lm.py new file mode 100644 index 0000000000000000000000000000000000000000..255a05b33a331835ae8951119710b26b66a4585c --- /dev/null +++ b/lm-evaluation-harness/lm_eval/models/optimum_lm.py @@ -0,0 +1,69 @@ +from importlib.util import find_spec +from pathlib import Path + +from lm_eval.api.registry import register_model +from lm_eval.models.huggingface import HFLM + + +@register_model("openvino") +class OptimumLM(HFLM): + """ + Optimum Intel provides a simple interface to optimize Transformer models and convert them to \ + OpenVINO™ Intermediate Representation (IR) format to accelerate end-to-end pipelines on \ + Intel® architectures using OpenVINO™ runtime. + """ + + def __init__( + self, + device="cpu", + **kwargs, + ) -> None: + if "backend" in kwargs: + # optimum currently only supports causal models + assert ( + kwargs["backend"] == "causal" + ), "Currently, only OVModelForCausalLM is supported." + + self.openvino_device = device + + super().__init__( + device=self.openvino_device, + backend=kwargs.pop("backend", "causal"), + **kwargs, + ) + + def _create_model( + self, + pretrained: str, + revision="main", + dtype="auto", + trust_remote_code=False, + **kwargs, + ) -> None: + if not find_spec("optimum"): + raise Exception( + "package `optimum` is not installed. Please install it via `pip install optimum[openvino]`" + ) + else: + from optimum.intel.openvino import OVModelForCausalLM + + model_kwargs = kwargs if kwargs else {} + model_file = Path(pretrained) / "openvino_model.xml" + if model_file.exists(): + export = False + else: + export = True + kwargs["ov_config"] = { + "PERFORMANCE_HINT": "LATENCY", + "NUM_STREAMS": "1", + "CACHE_DIR": "", + } + + self._model = OVModelForCausalLM.from_pretrained( + pretrained, + revision=revision, + trust_remote_code=trust_remote_code, + export=export, + device=self.openvino_device.upper(), + **model_kwargs, + ) diff --git a/lm-evaluation-harness/lm_eval/models/textsynth.py b/lm-evaluation-harness/lm_eval/models/textsynth.py new file mode 100644 index 0000000000000000000000000000000000000000..2ede0b44e036b5eb78525dffc4cea49e45a4c092 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/models/textsynth.py @@ -0,0 +1,171 @@ +""" TextSynth API +Implementation provided by Fabrice Bellard: + https://github.com/EleutherAI/lm-evaluation-harness/issues/295 + +In order to use the API, you must have a valid TextSynth account and +enough credits. + +Example usage: + + python main.py --model textsynth --model_args engine=gptj_6B --no_cache --tasks piqa + +Homepage: https://textsynth.com/index.html +""" +import logging +import os + +import requests as _requests +from tqdm import tqdm + +from lm_eval.api.model import LM +from lm_eval.api.registry import register_model +from lm_eval.models.utils import retry_on_specific_exceptions + + +logger = logging.getLogger(__name__) + + +def textsynth_completion(**kwargs): + """Query TextSynth API for completion. + Retry with back-off until they respond. + """ + + def _exception_callback(e: Exception, sleep_time: float) -> None: + import traceback + + traceback.print_exc() + + @retry_on_specific_exceptions( + on_exceptions=[_requests.exceptions.RequestException], + max_retries=None, # retry forever, consider changing + on_exception_callback=_exception_callback, + ) + def completion(): + return _requests.post(**kwargs) + + return completion() + + +@register_model("textsynth") +class TextSynthLM(LM): + def __init__(self, engine, truncate: bool = False, **kwargs) -> None: + """ + :param engine: str + TextSynth API engine (e.g. `gptj_6B`) + :param truncate: bool + Truncate input if too long (if False and input is too long, throw error) + """ + super().__init__() + + self.engine = engine + self.truncate = truncate + self.api_url = "https://api.textsynth.com" + # Read from environment variable TEXTSYNTH_API_SECRET_KEY + self.api_key = os.environ["TEXTSYNTH_API_SECRET_KEY"] + + @property + def eot_token_id(self): + # Isn't used because we override loglikelihood, loglikelihood_rolling and generate_until + raise NotImplementedError() + + @property + def max_length(self) -> int: + # NOTE: Turn on truncation to avoid errors on long inputs. + return 2048 + + @property + def max_gen_toks(self) -> int: + return 256 + + @property + def batch_size(self): + # Isn't used because we override loglikelihood, loglikelihood_rolling and generate_until + raise NotImplementedError() + + @property + def device(self): + # Isn't used because we override loglikelihood, loglikelihood_rolling and generate_until + raise NotImplementedError() + + def tok_encode(self, string: str): + # Isn't used because we override loglikelihood, loglikelihood_rolling and generate_until + raise NotImplementedError() + + def tok_decode(self, tokens): + # Isn't used because we override loglikelihood, loglikelihood_rolling and generate_until + raise NotImplementedError() + + def loglikelihood(self, requests, disable_tqdm: bool = False): + res = [] + for context, continuation in tqdm(requests, disable=disable_tqdm): + response = textsynth_completion( + url=self.api_url + "/v1/engines/" + self.engine + "/logprob", + headers={"Authorization": "Bearer " + self.api_key}, + json={"context": context, "continuation": continuation}, + ) + resp = response.json() + if "logprob" in resp: + logprob = resp["logprob"] + is_greedy = resp["is_greedy"] + res.append((logprob, is_greedy)) + + self.cache_hook.add_partial( + "loglikelihood", (context, continuation), (logprob, is_greedy) + ) + else: + logger.error( + f"The following response does not contain `logprobs`. Got:\n{resp}" + ) + assert False + return res + + def loglikelihood_rolling(self, requests, disable_tqdm: bool = False): + # TODO: The TextSynth API does not support tokenized inputs so we cannot + # manually partition long contexts into smaller rolling windows as + # done for other models derived from `BaseLM`. Override this method + # with a windowing scheme that works for direct string inputs. + raise NotImplementedError( + "`loglikelihood_rolling` is currently not supported due to lack of " + "input tokenization support from TextSynth." + ) + + def generate_until(self, requests, disable_tqdm: bool = False): + if not requests: + return [] + + res = [] + for request in tqdm(requests, disable=disable_tqdm): + inp = request[0] + request_args = request[1] + until = request_args["until"] + response = textsynth_completion( + url=self.api_url + "/v1/engines/" + self.engine + "/completions", + headers={"Authorization": "Bearer " + self.api_key}, + json={ + "prompt": inp, + "max_tokens": self.max_gen_toks, + "top_k": 1, + "stop": until, + }, + ) + resp = response.json() + if "text" in resp: + s = resp["text"] + res.append(s) + + self.cache_hook.add_partial("generate_until", (inp, request_args), s) + else: + logger.error( + "The following response does not contain generated `text`. " + "Got:\n{resp}" + ) + assert False + return res + + def _model_call(self, inps): + # Isn't used because we override _loglikelihood_tokens + raise NotImplementedError() + + def _model_generate(self, context, max_length, eos_token_id): + # Isn't used because we override generate_until + raise NotImplementedError() diff --git a/lm-evaluation-harness/lm_eval/models/utils.py b/lm-evaluation-harness/lm_eval/models/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..09818f4eddcae3749d93adf48e40c4e657ab73e3 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/models/utils.py @@ -0,0 +1,615 @@ +import collections +import fnmatch +import gc +import itertools +import time +from functools import wraps +from typing import ( + Any, + Callable, + Dict, + Iterable, + Iterator, + List, + Literal, + Optional, + Tuple, + Type, + Union, +) + +import torch +import transformers + +from lm_eval.utils import eval_logger + + +def chunks(iter, n: int = 0, fn=None): + """ + Divides an iterable into chunks of specified size or based on a given function. + Useful for batching + + Parameters: + - iter: The input iterable to be divided into chunks. + - n: An integer representing the size of each chunk. Default is 0. + - fn: A function that takes the current index and the iterable as arguments and returns the size of the chunk. Default is None. + + Returns: + An iterator that yields chunks of the input iterable. + + Example usage: + ``` + data = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] + for chunk in chunks(data, 3): + print(chunk) + ``` + Output: + ``` + [1, 2, 3] + [4, 5, 6] + [7, 8, 9] + [10] + ``` + """ + arr = [] + for i, x in enumerate(iter): + arr.append(x) + if len(arr) == (fn(i, iter) if fn else n): + yield arr + arr = [] + + if arr: + yield arr + + +class MultiChoice: + def __init__(self, choices) -> None: + self.choices = choices + + # Simple wildcard support (linux filename patterns) + def __contains__(self, values) -> bool: + for value in values.split(","): + if len(fnmatch.filter(self.choices, value)) == 0: + eval_logger.info("Available tasks to choose:") + for choice in self.choices: + eval_logger.info(f" - {choice}") + raise ValueError("'{}' is not in task list".format(value)) + return True + + def __iter__(self) -> Iterator: + for choice in self.choices: + yield choice + + +class Grouper: + """ + takes an array `arr` and function `fn` and returns a dictionary + with keys fn(ob) for each ob in `arr` and with values `self.arr[key]` a list of all + objects in `arr` satisfying `key == fn(ob)`. + """ + + def __init__(self, arr, fn) -> None: + # self.orig_arr = arr + self.size = len(arr) + arr = list(enumerate(arr)) + + def group_return_dict(arr, fn): + res = collections.defaultdict(list) + + for ob in arr: + res[fn(ob)].append(ob) + return res + + arr = group_return_dict(arr, lambda x: fn(x[1])) + + # self.arr has format Dict[Tuple[int, ]] + self.arr = arr + self._grouped = None + + def get_grouped(self): + # return the contents but not indices for our grouped dict. + if self._grouped: + return self._grouped + grouped = {} + for key in self.arr.keys(): + # drop the index from each element of self.arr + grouped[key] = [y[1] for y in self.arr[key]] + self._grouped = grouped + return grouped + + def get_original(self, grouped_dict): + # take in a grouped dictionary with e.g. results for each key listed + # in the same order as the instances in `self.arr`, and + # return the results in the same (single list) order as `self.orig_arr`. + res = [None] * self.size + cov = [False] * self.size + # orig = [None] * self.size + + assert grouped_dict.keys() == self.arr.keys() + + for key in grouped_dict.keys(): + for (ind, _), v in zip(self.arr[key], grouped_dict[key]): + res[ind] = v + cov[ind] = True + # orig[ind] = _ + + assert all(cov) + # assert orig == self.orig_arr + + return res + + +def pad_and_concat( + max_length: int, + tensors: List[torch.Tensor], + padding_side: Literal["right", "left"] = "right", +): + """ + Method for padding a list of tensors given the maximum tensor + length in the batch. Used for batching inputs and continuations in + seq2seq models. + """ + assert ( + padding_side == "left" or padding_side == "right" + ), f"Unrecognized padding type: '{padding_side}' not 'left' or 'right'" + + for i, tensor in enumerate(tensors): + if len(tensor.shape) == 2: + tensor = tensor.squeeze(0) # squeeze, in case passed [1, seq] size + tensor_len = tensor.shape[0] + if tensor_len < max_length: + if padding_side == "right": + # right-pad + tensors[i] = torch.cat( + [ + tensor, # [seq] + torch.zeros( + max_length - tensor_len, + dtype=torch.long, + device=tensor.device, + ), # [padding_length - seq] + ], + dim=0, + ).unsqueeze(0) + else: + # left-pad + tensors[i] = torch.cat( + [ + torch.zeros( + max_length - tensor_len, + dtype=torch.long, + device=tensor.device, + ), # [padding_length - seq] + tensor, # [seq] + ], + dim=0, + ).unsqueeze(0) + else: + tensors[i] = tensor.unsqueeze(0) + + return torch.cat(tensors, dim=0) + + +def clear_torch_cache() -> None: + gc.collect() + torch.cuda.empty_cache() + + +def get_dtype(dtype: Union[str, torch.dtype]) -> torch.dtype: + """Converts `dtype` from `str` to torch.dtype when possible. Does not use an instantiated HF AutoConfig""" + if isinstance(dtype, str) and dtype != "auto": + # Convert `str` args torch dtype: `float16` -> `torch.float16` + _torch_dtype = getattr(torch, dtype) + else: + _torch_dtype = dtype + return _torch_dtype + + +class MultiTokenEOSCriteria(transformers.StoppingCriteria): + """Criteria to stop on the specified multi-token sequence.""" + + def __init__( + self, + sequence: str, + tokenizer: transformers.PreTrainedTokenizer, + initial_decoder_input_length: int, + batch_size: int, + ) -> None: + self.initial_decoder_input_length = initial_decoder_input_length + self.done_tracker = [False] * batch_size + self.sequence = sequence + self.sequence_ids = tokenizer.encode(sequence, add_special_tokens=False) + # print(sequence, self.sequence_ids) + # we look back for 2 more tokens than it takes to encode our stop sequence + # because tokenizers suck, and a model might generate `['\n', '\n']` but our `sequence` is `['\n\n']` + # and we don't want to mistakenly not stop a generation because our + # (string) stop sequence was output in a different tokenization + + # NOTE: there is a minor danger that this will end up looking back 2 tokens into the past, into the inputs to the model, + # and stopping generation immediately as a result. With only 2 extra tokens of lookback, this risk is minimized + # Additionally, in lookback_ids_batch we should prevent ever looking back into the inputs as described. + self.sequence_id_len = len(self.sequence_ids) + 2 + self.tokenizer = tokenizer + + def __call__(self, input_ids, scores, **kwargs) -> bool: + # For efficiency, we compare the last n tokens where n is the number of tokens in the stop_sequence + lookback_ids_batch = input_ids[:, self.initial_decoder_input_length :] + + lookback_ids_batch = lookback_ids_batch[:, -self.sequence_id_len :] + + lookback_tokens_batch = self.tokenizer.batch_decode(lookback_ids_batch) + + for i, done in enumerate(self.done_tracker): + if not done: + self.done_tracker[i] = self.sequence in lookback_tokens_batch[i] + return False not in self.done_tracker + + +def stop_sequences_criteria( + tokenizer: transformers.PreTrainedTokenizer, + stop_sequences: List[str], + initial_decoder_input_length: int, + batch_size: int, +) -> transformers.StoppingCriteriaList: + return transformers.StoppingCriteriaList( + [ + *[ + MultiTokenEOSCriteria( + sequence, tokenizer, initial_decoder_input_length, batch_size + ) + for sequence in stop_sequences + ], + ] + ) + + +def undistribute(iterable): + """ + Undoes https://more-itertools.readthedocs.io/en/stable/api.html#more_itertools.distribute . + + Re-interleaves results that have been split using more_itertools.distribute: + >>> group_1, group_2 = distribute(2, [1, 2, 3, 4, 5, 6]) + >>> list(group_1) + [1, 3, 5] + >>> list(group_2) + [2, 4, 6] + >>> undistribute([group_1, group_2]) + [1, 2, 3, 4, 5, 6] + + Handles non-uniform component lengths: + + >>> children = distribute(3, [1, 2, 3, 4, 5, 6, 7]) + >>> [list(c) for c in children] + [[1, 4, 7], [2, 5], [3, 6]] + >>> undistribute(children) + [1, 2, 3, 4, 5, 6, 7] + + Also handles when some iterables are empty: + + >>> children = distribute(5, [1, 2, 3]) + >>> [list(c) for c in children] + [[1], [2], [3], [], []] + >>> undistribute(children) + [1, 2, 3] + + """ + + return [ + x + for x in itertools.chain.from_iterable( + itertools.zip_longest(*[list(x) for x in iterable]) + ) + if x is not None + ] + + +def retry_on_specific_exceptions( + on_exceptions: List[Type[Exception]], + max_retries: Optional[int] = None, + backoff_time: float = 3.0, + backoff_multiplier: float = 1.5, + on_exception_callback: Optional[Callable[[Exception, float], Any]] = None, +): + """Retry on an LLM Provider's rate limit error with exponential backoff + For example, to use for OpenAI, do the following: + ``` + from openai import RateLimitError + + # Recommend specifying max_retries to avoid infinite loops! + @retry_on_specific_exceptions([RateLimitError], max_retries=3) + def completion(...): + # Wrap OpenAI completion function here + ... + ``` + """ + + def decorator(func: Callable): + @wraps(func) + def wrapper(*args, **kwargs): + sleep_time = backoff_time + attempt = 0 + while max_retries is None or attempt < max_retries: + try: + return func(*args, **kwargs) + except tuple(on_exceptions) as e: + if on_exception_callback is not None: + on_exception_callback(e, sleep_time) + time.sleep(sleep_time) + sleep_time *= backoff_multiplier + attempt += 1 + + return wrapper + + return decorator + + +class Collator: + """ + A class for reordering and batching elements of an array. + + This class allows for sorting an array based on a provided sorting function, grouping elements based on a grouping function, and generating batches from the sorted and grouped data. + + Objects of this class have the group_by attribute which determines the method for grouping + the data while batching it. Three options include "gen_kwargs", "contexts", or None: + If group_by == "gen_kwargs" then requests will be grouped by gen_kwargs + If group_by == "contexts" then requests will be grouped by context + cont[:-1] + If None then requests will just be reordered by length descending. + """ + + def __init__( + self, + arr: List, + sort_fn: Callable = lambda x: x, + group_fn: Callable = lambda x: x[1], + group_by: Union[Literal["gen_kwargs", "contexts"], None] = None, + ) -> None: + self._group_by = group_by + # 0 indices are enumerated indices. Apply functions to original arr. + self._sort_fn = lambda x: sort_fn(x[1]) + self._group_fn = lambda x: group_fn(x[1]) + self._reorder_indices: List = [] + self._size = len(arr) + self._arr_with_indices: Union[Dict, Tuple[Tuple[int, Any], ...]] = tuple( + enumerate(arr) + ) # [indices, (arr)] + if self._group_by == "contexts": + self._group_by_context() + elif self._group_by == "gen_kwargs": + self._group_by_index() + + def _group_by_index(self) -> None: + """Group the elements of a list based on their indices.""" + self._arr_with_indices = self.group( + self._arr_with_indices, fn=self._group_fn, group_by="gen_kwargs" + ) + + def _group_by_context(self) -> None: + """Group the array with indices by context.""" + self._arr_with_indices = self.group( + self._arr_with_indices, fn=self._group_fn, group_by="contexts" + ) + + def get_batched(self, n: int = 1, batch_fn: Optional[Callable] = None) -> Iterator: + """ + Generates and yields batches from the reordered array. The method of grouping and batching + depends on the parameter `group_by`. + If `group_by` is set to "gen_kwargs", it will batch the + re-ordered values with same gen_kwargs for each batch. + If `group_by` is "contexts", it caches the requests by context before batching. + If `group_by` is neither "gen_kwargs" nor "contexts", it yields the reordered array + + Parameters: + - n (int): The size of each batch. Defaults to 1. + - batch_fn ([Callable[[int, Iterable], int]] | None): A function to determine the size of + each batch. Optional, defaults to None. + + Returns: + Iterator: An iterator over batches of reordered elements grouped as per the `group_by` + attribute. + + Yields: + List of batched elements according to the `group_by` attribute. + """ + if self._group_by == "gen_kwargs": + for ( + key, + values, + ) in self._arr_with_indices.items(): # type: ignore + values = self._reorder(values) + batch = self.get_chunks(values, n=n, fn=batch_fn) + yield from batch + elif self._group_by == "contexts": + # Get one sample from each key + values = self._reorder( + [value[0] for value in self._arr_with_indices.values()] + ) + batch = self.get_chunks(values, n=n, fn=batch_fn) + yield from batch + else: + values = self._reorder(self._arr_with_indices) # type: ignore + batch = self.get_chunks(values, n=n, fn=batch_fn) + yield from batch + + def get_cache( + self, + req_str: Tuple[str, str] = None, + cxt_toks: List[int] = None, + cont_toks: List[int] = None, + logits: torch.Tensor = None, + ) -> Iterator[Tuple[Tuple[str, str], List[int], torch.Tensor]]: + """ + Retrieves cached single-token continuations and their associated arguments, updating indices as necessary. + + The behavior of this function varies depending on how the `group_by` attribute is set: + + - When `group_by` is "contexts": + The function identifies single-token continuations by checking for keys that equate to + [context+continuation][-1] and logs the indices for re-ordering. + In this mode, this function can work in two scenarios: + + 1. Cache Hit - Single Match: + If a single matching context-continuation pair is found in the cache, + the function yields the original arguments. + + 2. Cache Hit - Multiple Matches: + If multiple matching context-continuation pairs are found in the cache, + the function expands the logits batch dimension to match the number of cache hits. + It updates the original requests and continuation tokens. + + - When `group_by` is not set to "contexts": + This method yields the original arguments, logits and continuation tokens, + without checking for one-token continuations. + + Parameters: + - req_str (tuple[str, str]): Original strings used for CachingLM. + - cxt_toks (list[int]): Full context tokens used for lookup. + - cont_toks (list[int]): Continuation tokens for which logits were generated. + - logits (torch.Tensor [1, seq_length, vocab_size]): Logits generated by the model given context and continuation keys. + + Yields: + - Iterator: + - req_str (tuple[str, str]): strings used for CachingLM. + - cont_toks (list[int]) : continuation tokens. + - logits (torch.Tensor [1, seq_length, vocab_size]): The original logits (repeated cache hit times) + """ + if self._group_by == "contexts": + cache_hit: List[ + Tuple[int, Tuple[Tuple[str, str], List[int], List[int]]] + ] = self._arr_with_indices.pop(tuple(cxt_toks + cont_toks[:-1])) + if (cache_size := len(cache_hit)) == 1: + self._reorder_indices.extend(x[0] for x in cache_hit) + yield req_str, cont_toks, logits + else: + # If we have matching requests then expand the batch dimension (no-op) and + # yield each along with its corresponding args. + multilogits = logits.expand(cache_size, -1, -1).chunk(cache_size) + indices, req_str, cont_toks = zip( + *[(x[0], x[1][0], x[-1][-1]) for x in cache_hit] + ) + self._reorder_indices.extend(indices) + for c_key, cont_tok, logit in zip(req_str, cont_toks, multilogits): + yield c_key, cont_tok, logit + else: + yield req_str, cont_toks, logits + + def _reorder(self, arr: Union[List, Tuple[Tuple[int, Any], ...]]) -> Iterator: + """ + Reorders the elements in the array based on the sorting function. + + Parameters: + - arr (list | tuple[tuple[int, Any], ...]]): The array or iterable to be reordered. + + Yields: + Iterator + """ + arr = sorted(arr, key=self._sort_fn) + if not self._group_by == "contexts": + # If grouped by contexts then indices will be set in get_cache() + self._reorder_indices.extend([x[0] for x in arr]) + yield from [x[1] for x in arr] + + def get_original(self, newarr: List) -> List: + """ + Restores the original order of elements from the reordered list. + + Parameters: + - newarr (list): The reordered array. + + Returns: + list: The array with elements restored to their original order. + """ + res = [None] * self._size + cov = [False] * self._size + + for ind, v in zip(self._reorder_indices, newarr): + res[ind] = v + cov[ind] = True + + assert all(cov) + + return res + + def __len__(self): + return self._size + + @staticmethod + def group( + arr: Iterable, + fn: Callable, + group_by: Literal["gen_kwargs", "contexts"] = "gen_kwargs", + ) -> dict: + """ + Groups elements of an iterable based on a provided function. + + + The `group_by` parameter determines the method of grouping. + If `group_by` is "contexts", the elements are grouped by [context + cont][:-1]. + If `group_by` is "gen_kwargs", the elements are grouped based on the gen_kwargs dict. + + Parameters: + - arr (Iterable): The iterable to be grouped. + - fn (Callable): The function to determine the grouping. + - values (bool): If True, returns the values of the group. Defaults to False. + + Returns: + Iterator: An iterable of grouped elements. + """ + res = collections.defaultdict(list) + for ob in arr: + # where ob == [context + cont] + if group_by == "contexts": + res[tuple(fn(ob))].append(ob) + else: + try: + hashable_dict = tuple( + ( + key, + tuple(value) + if isinstance(value, collections.abc.Iterable) + else value, + ) + for key, value in sorted(fn(ob).items()) + ) + res[hashable_dict].append(ob) + except (TypeError, AttributeError): + res[tuple(fn(ob))].append(ob) + return res + + @staticmethod + def get_chunks(_iter, n: int = 0, fn=None): + """ + Divides an iterable into chunks of specified size or based on a given function. + Useful for batching + + Parameters: + - iter: The input iterable to be divided into chunks. + - n: An integer representing the size of each chunk. Default is 0. + - fn: A function that takes the current index and the iterable as arguments and returns the size of the chunk. Default is None. + + Returns: + An iterator that yields chunks of the input iterable. + + Example usage: + ``` + data = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] + for chunk in chunks(data, 3): + print(chunk) + ``` + Output: + ``` + [1, 2, 3] + [4, 5, 6] + [7, 8, 9] + [10] + ``` + """ + arr = [] + _iter = tuple(_iter) + for i, x in enumerate(_iter): + arr.append(x) + if len(arr) == (fn(i, _iter) if fn else n): + yield arr + arr = [] + + if arr: + yield arr diff --git a/lm-evaluation-harness/lm_eval/models/vllm_causallms.py b/lm-evaluation-harness/lm_eval/models/vllm_causallms.py new file mode 100644 index 0000000000000000000000000000000000000000..07d8b172618ccf87d3e600c64cf9c250319a048a --- /dev/null +++ b/lm-evaluation-harness/lm_eval/models/vllm_causallms.py @@ -0,0 +1,487 @@ +import copy +from importlib.metadata import version +from importlib.util import find_spec +from typing import List, Literal, Optional, Tuple, Union + +from more_itertools import distribute +from packaging.version import parse as parse_version +from tqdm import tqdm + +from lm_eval.api.instance import Instance +from lm_eval.api.model import TemplateLM +from lm_eval.api.registry import register_model +from lm_eval.models.utils import Collator, undistribute +from lm_eval.utils import ( + eval_logger, + get_rolling_token_windows, + make_disjoint_window, +) + + +try: + import ray + from vllm import LLM, SamplingParams + from vllm.transformers_utils.tokenizer import get_tokenizer +except ModuleNotFoundError: + pass + +eval_logger = eval_logger + + +@register_model("vllm") +class VLLM(TemplateLM): + _DEFAULT_MAX_LENGTH = 2048 + + def __init__( + self, + pretrained="gpt2", + dtype: Literal["float16", "bfloat16", "float32", "auto"] = "auto", + revision: Optional[str] = None, + trust_remote_code: Optional[bool] = False, + tokenizer: Optional[str] = None, + tokenizer_mode: Literal["auto", "slow"] = "auto", + tokenizer_revision: Optional[str] = None, + add_bos_token: Optional[bool] = False, + prefix_token_id: Optional[int] = None, + tensor_parallel_size: int = 1, + quantization: Optional[str] = None, + max_gen_toks: int = 256, + swap_space: int = 4, + batch_size: Union[str, int] = 1, + max_batch_size=None, + max_length: int = None, + max_model_len: int = None, + seed: int = 1234, + gpu_memory_utilization: float = 0.9, + device: str = "cuda", + data_parallel_size: int = 1, + **kwargs, + ): + super().__init__() + + if not find_spec("vllm"): + raise Exception( + "attempted to use 'vllm' LM type, but package `vllm` is not installed. " + "Please install vllm via `pip install lm-eval[vllm]` or `pip install -e .[vllm]`" + ) + + assert "cuda" in device or device is None, "vLLM only supports CUDA" + assert ( + max_length is None or max_model_len is None + ), "Either max_length or max_model_len may be provided, but not both" + + self._max_length = max_model_len if max_model_len is not None else max_length + self.tensor_parallel_size = int(tensor_parallel_size) + self.data_parallel_size = int(data_parallel_size) + self.model_args = { + "model": pretrained, + "gpu_memory_utilization": float(gpu_memory_utilization), + "revision": revision, + "dtype": dtype, + "tokenizer": tokenizer, + "tokenizer_mode": tokenizer_mode, + "tokenizer_revision": tokenizer_revision, + "trust_remote_code": trust_remote_code, + "tensor_parallel_size": int(tensor_parallel_size), + "max_model_len": int(self._max_length) if self._max_length else None, + "swap_space": int(swap_space), + "quantization": quantization, + "seed": int(seed), + } + self.model_args.update(kwargs) + self.batch_size = ( + "auto" + if isinstance(batch_size, str) and "auto" in batch_size + else batch_size + ) + if self.data_parallel_size <= 1: + self.model = LLM(**self.model_args) + else: + assert parse_version(version("vllm")) < parse_version( + "0.3.3" + ), "data_parallel is only compatible with vllm < v0.3.3." + eval_logger.warning( + "You might experience occasional issues with model weight downloading when data_parallel is in use. To ensure stable performance, run with data_parallel_size=1 until the weights are downloaded and cached." + ) + self.model_args["worker_use_ray"] = True + self.batch_size = "auto" + eval_logger.info("Manual batching is not compatible with data parallelism.") + + from transformers import AutoConfig + + self._config = AutoConfig.from_pretrained( + pretrained, trust_remote_code=trust_remote_code, revision=revision + ) + self.tokenizer = get_tokenizer( + tokenizer if tokenizer else pretrained, + tokenizer_mode=tokenizer_mode, + trust_remote_code=trust_remote_code, + tokenizer_revision=tokenizer_revision, + ) + self.add_bos_token = add_bos_token + self.custom_prefix_token_id = prefix_token_id + if prefix_token_id is not None: + eval_logger.info( + f"Loglikelihood prefix token id used in evaluation: {self.prefix_token_id}" + ) + + self._max_gen_toks = max_gen_toks + + @property + def eot_token_id(self): + # we use EOT because end of *text* is more accurate for what we're doing than end of *sentence* + return self.tokenizer.eos_token_id + + @property + def prefix_token_id(self): + # it is used as prefix for loglikelihood + if self.custom_prefix_token_id is not None: + return self.custom_prefix_token_id + if self.tokenizer.bos_token_id is not None: + return self.tokenizer.bos_token_id + return self.tokenizer.eos_token_id + + @property + def max_length(self): + if self._max_length: # if max length manually set, return it + return self._max_length + if self.data_parallel_size <= 1: + return self.model.llm_engine.model_config.max_model_len + else: + seqlen_config_attrs = ("n_positions", "max_position_embeddings", "n_ctx") + for attr in seqlen_config_attrs: + if hasattr(self._config, attr): + return getattr(self._config, attr) + if hasattr(self.tokenizer, "model_max_length"): + if self.tokenizer.model_max_length == 1000000000000000019884624838656: + return self._DEFAULT_MAX_LENGTH + return self.tokenizer.model_max_length + return self._DEFAULT_MAX_LENGTH + + @property + def max_gen_toks(self): + return self._max_gen_toks + + def tok_encode( + self, + string: str, + left_truncate_len=None, + add_special_tokens=None, + truncation=False, + ): + """ """ + if not add_special_tokens: + add_special_tokens = False or self.add_bos_token + encoding = self.tokenizer.encode( + string, add_special_tokens=add_special_tokens, truncation=truncation + ) + + # left-truncate the encoded context to be at most `left_truncate_len` tokens long + if left_truncate_len: + encoding = encoding[-left_truncate_len:] + + return encoding + + def _model_generate( + self, + requests: List[List[int]] = None, + generate: bool = False, + max_tokens: int = None, + stop: Optional[List[str]] = None, + **kwargs, + ): + if generate: + kwargs = self.modify_gen_kwargs(kwargs) + sampling_params = SamplingParams(max_tokens=max_tokens, stop=stop, **kwargs) + else: + sampling_params = SamplingParams( + temperature=0, prompt_logprobs=1, max_tokens=1 + ) + if self.data_parallel_size > 1: + # vLLM hangs if tensor_parallel > 1 and resources are set in ray.remote + # also seems to only work with decorator and not with ray.remote() fn + # see https://github.com/vllm-project/vllm/issues/973 + # note: this has changed on 0.3.3, and it only works now if num_gpus are set. + # but then tensor_parallel breaks + @ray.remote + def run_inference_one_model( + model_args: dict, sampling_params, requests: List[List[int]] + ): + llm = LLM(**model_args) + return llm.generate( + prompt_token_ids=requests, sampling_params=sampling_params + ) + + # dispatch requests to all self.data_parallel_size workers, in interleaved fashion + # interleaved important to balance context lengths across workers + requests = [list(x) for x in distribute(self.data_parallel_size, requests)] + inputs = ((self.model_args, sampling_params, req) for req in requests) + object_refs = [run_inference_one_model.remote(*x) for x in inputs] + results = ray.get(object_refs) + # Invoke ray.shutdown() to prevent hang-ups if subsequent calls required. + ray.shutdown() + # flatten results + return undistribute(results) + + outputs = self.model.generate( + prompt_token_ids=requests, + sampling_params=sampling_params, + use_tqdm=True if self.batch_size == "auto" else False, + ) + return outputs + + def loglikelihood_rolling( + self, requests: List[Instance], disable_tqdm: bool = False + ) -> List[float]: + loglikelihoods = [] + + for (string,) in tqdm([req.args for req in requests], disable=disable_tqdm): + rolling_token_windows = list( + map( + make_disjoint_window, + get_rolling_token_windows( + token_list=self.tok_encode(string), + prefix_token=self.eot_token_id, + max_seq_len=self.max_length - 1, + context_len=1, + ), + ) + ) + + rolling_token_windows = [(None,) + x for x in rolling_token_windows] + + string_nll = self._loglikelihood_tokens( + rolling_token_windows, + ) + + # discard is_greedy + string_nll = [x[0] for x in string_nll] + + string_nll = sum(string_nll) + loglikelihoods.append(string_nll) + return loglikelihoods + + def generate_until( + self, requests: List[Instance], disable_tqdm: bool = False + ) -> List[str]: + res = [] + + # batch tokenize contexts + context, all_gen_kwargs = zip(*(req.args for req in requests)) + context_encoding = self.tokenizer(context, add_special_tokens=False).input_ids + requests = [ + ((a, b), c) for a, b, c in zip(context, context_encoding, all_gen_kwargs) + ] + + def _collate_gen(_requests): + # the negative sign on len(toks) sorts descending - this has a few advantages: + # - time estimates will always be over not underestimates, which is more useful for planning + # - to know the size of a batch when going through the list, you know the first one is always the batch + # padded context length. this is useful to simplify the batching logic and more importantly to make + # automatic adaptive batches much much easier to implement + # - any OOMs will happen right away rather than near the end + return -len(_requests[0][1]), _requests[0][0] + + # we group requests by their generation_kwargs, + # so that we don't try to execute e.g. greedy sampling and temp=0.8 sampling + # in the same batch. + re_ords = Collator(requests, _collate_gen, group_by="gen_kwargs") + chunks = re_ords.get_batched( + n=int(self.batch_size) if self.batch_size != "auto" else 0, batch_fn=None + ) + + pbar = tqdm( + total=len(requests), + disable=(disable_tqdm or (self.rank != 0)), + desc="Running generate_until requests", + ) + # for each different set of kwargs, we execute all requests, by batch. + for chunk in chunks: + context_and_encoding, all_gen_kwargs = zip(*chunk) + context, context_encoding = zip(*context_and_encoding) + # we assume all gen kwargs in the batch are the same + # this is safe to assume because the `grouper` object ensures it. + gen_kwargs = all_gen_kwargs[0] + # unpack our keyword arguments. + until = None + if isinstance(gen_kwargs, dict): + kwargs = copy.deepcopy(gen_kwargs) # edge case for repeats > 1 + if "until" in kwargs.keys(): + until = kwargs.pop("until") + if isinstance(until, str): + until = [until] + elif not isinstance(until, list): + raise ValueError( + f"Expected `kwargs['until']` to be of type Union[str,list] but got {until}" + ) + else: + raise ValueError( + f"Expected `kwargs` to be of type `dict` but got {gen_kwargs}" + ) + # add EOS token to stop sequences + eos = self.tokenizer.decode(self.eot_token_id) + if not until: + until = [eos] + else: + until.append(eos) + if "max_gen_toks" in kwargs.keys(): + max_gen_toks = kwargs.pop("max_gen_toks") + else: + max_gen_toks = self.max_gen_toks + + # set the max length in tokens of inputs ("context_enc") + # max len for inputs = max length, minus room to generate the max new tokens + max_ctx_len = self.max_length - max_gen_toks + context_encoding = [x[-max_ctx_len:] for x in context_encoding] + + # perform batched generation + cont = self._model_generate( + requests=context_encoding, + generate=True, + max_tokens=max_gen_toks, + stop=until, + **kwargs, + ) + + # cache generations + for output, context in zip(cont, context): + generated_text = output.outputs[0].text + res.append(generated_text) + self.cache_hook.add_partial( + "generate_until", (context, gen_kwargs), generated_text + ) + pbar.update(1) + + pbar.close() + # reorder all group of results back to original unsorted form + return re_ords.get_original(res) + + def _loglikelihood_tokens( + self, + requests: List[Tuple[Tuple[str, str], List[int], List[int]]], + disable_tqdm: bool = False, + ) -> List[Tuple[float, bool]]: + res = [] + + def _collate(x): + toks = x[1] + x[2] + return -len(toks), tuple(toks) + + # Reorder requests by length and batch + re_ord = Collator(requests, sort_fn=_collate) + chunks = re_ord.get_batched( + n=int(self.batch_size) if self.batch_size != "auto" else 0, batch_fn=None + ) + + pbar = tqdm( + total=len(requests), + disable=disable_tqdm, + desc="Running loglikelihood requests", + ) + for chunk in chunks: + inputs = [] + ctxlens = [] + for cache_key, context_enc, continuation_enc in chunk: + inp = (context_enc + continuation_enc)[-(self.max_length) :] + ctxlen = len(context_enc) - max( + 0, len(context_enc) + len(continuation_enc) - (self.max_length) + ) + + inputs.append(inp) + ctxlens.append(ctxlen) + + outputs = self._model_generate(requests=inputs, generate=False) + + for output, ctxlen, (cache_key, _, _), inp in zip( + outputs, ctxlens, chunk, inputs + ): + answer = self._parse_logprobs( + tokens=inp, + outputs=output, + ctxlen=ctxlen, + ) + + res.append(answer) + + # partial caching + if cache_key is not None: + self.cache_hook.add_partial("loglikelihood", cache_key, answer) + pbar.update(1) + pbar.close() + return re_ord.get_original(res) + + @staticmethod + def _parse_logprobs(tokens: List, outputs, ctxlen: int) -> Tuple[float, bool]: + """Process logprobs and tokens. + + :param tokens: list + Input tokens (potentially left-truncated) + :param outputs: RequestOutput + Contains prompt_logprobs + :param ctxlen: int + Length of context (so we can slice them away and only keep the predictions) + :return: + continuation_logprobs: float + Log probabilities of continuation tokens + is_greedy: bool + Whether argmax matches given continuation exactly + """ + + # The first entry of prompt_logprobs is None because the model has no previous tokens to condition on. + continuation_logprobs_dicts = outputs.prompt_logprobs + + def coerce_logprob_to_num(logprob): + # vLLM changed the return type of logprobs from float + # to a Logprob object storing the float value + extra data + # (https://github.com/vllm-project/vllm/pull/3065). + # If we are dealing with vllm's Logprob object, return + # the logprob value stored as an attribute. Otherwise, + # return the object itself (which should be a float + # for older versions of vLLM). + return getattr(logprob, "logprob", logprob) + + continuation_logprobs_dicts = [ + { + token: coerce_logprob_to_num(logprob) + for token, logprob in logprob_dict.items() + } + if logprob_dict is not None + else None + for logprob_dict in continuation_logprobs_dicts + ] + + # Calculate continuation_logprobs + # assume ctxlen always >= 1 + continuation_logprobs = sum( + logprob_dict.get(token) + for token, logprob_dict in zip( + tokens[ctxlen:], continuation_logprobs_dicts[ctxlen:] + ) + ) + + # Determine if is_greedy + is_greedy = True + for token, logprob_dict in zip( + tokens[ctxlen:], continuation_logprobs_dicts[ctxlen:] + ): + # Get the token with the maximum log probability from the logprob_dict + if logprob_dict: # Ensure the logprob_dict is not None + top_token = max(logprob_dict, key=logprob_dict.get) + if top_token != token: + is_greedy = False + break + + return continuation_logprobs, is_greedy + + @staticmethod + def modify_gen_kwargs(kwargs: dict) -> dict: + # sampling_params + do_sample = kwargs.pop("do_sample", None) + if do_sample is False or "temperature" not in kwargs: + kwargs["temperature"] = 0.0 + # hf defaults + kwargs["skip_special_tokens"] = kwargs.get("skip_special_tokens", False) + kwargs["spaces_between_special_tokens"] = kwargs.get( + "spaces_between_special_tokens", False + ) + return kwargs diff --git a/lm-evaluation-harness/lm_eval/prompts/__init__.py b/lm-evaluation-harness/lm_eval/prompts/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..1f814214de4afaabd1367854c74dc2143c346744 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/prompts/__init__.py @@ -0,0 +1,126 @@ +import ast +import os +from typing import Dict + +from lm_eval import utils +from lm_eval.utils import eval_logger + + +# Prompt library. +# Stores prompts in a dictionary indexed by 2 levels: +# prompt category name, and prompt name. +# This allows us to access prompts +PROMPT_REGISTRY: Dict[str, Dict[str, str]] = { + "qa-basic": { + "question-newline-answer": "Question: {{question}}\nAnswer:", + "q-newline-a": "Q: {{question}}\nA:", + }, +} + + +def get_prompt(prompt_id: str, dataset_name: str = None, subset_name: str = None): + # unpack prompt name + category_name, prompt_name = prompt_id.split(":") + if subset_name is None: + dataset_full_name = dataset_name + else: + dataset_full_name = f"{dataset_name}-{subset_name}" + eval_logger.info(f"Loading prompt from {category_name} for {dataset_full_name}") + if category_name == "promptsource": + try: + from promptsource.templates import DatasetTemplates + except ModuleNotFoundError: + raise Exception( + "Tried to load a Promptsource template, but promptsource is not installed ", + "please install promptsource via pip install lm-eval[promptsource] or pip install -e .[promptsource]", + ) + try: + if subset_name is None: + prompts = DatasetTemplates(dataset_name=dataset_name) + else: + prompts = DatasetTemplates( + dataset_name=dataset_name, subset_name=subset_name + ) + except Exception: + raise ValueError(f"{dataset_name} and {subset_name} not found") + if prompt_name in prompts.all_template_names: + return prompts[prompt_name] + else: + raise ValueError( + f"{prompt_name} not in prompt list {prompts.all_template_names}" + ) + elif ".yaml" in category_name: + import yaml + + with open(category_name, "rb") as file: + prompt_yaml_file = yaml.full_load(file) + + prompt_string = prompt_yaml_file["prompts"][prompt_name] + return PromptString(prompt_string) + else: + try: + return PROMPT_REGISTRY[category_name][prompt_name] + except Exception: + raise ValueError( + f"expected only a single `:` as separator between \ + prompt category and name, but got `{prompt_id}` instead" + ) + + +def load_prompt_list( + use_prompt: str, dataset_name=None, subset_name=None, yaml_path=None, **kwargs +): + category_name, prompt_name = use_prompt.split(":") + + if category_name == "promptsource": + from promptsource.templates import DatasetTemplates + + if subset_name is None: + prompts = DatasetTemplates(dataset_name=dataset_name) + else: + prompts = DatasetTemplates( + dataset_name=dataset_name, subset_name=subset_name + ) + + prompt_list = utils.pattern_match(prompt_name, prompts.all_template_names) + + elif ".yaml" in category_name: + import yaml + + if yaml_path is not None: + category_name = os.path.realpath(os.path.join(yaml_path, category_name)) + + with open(category_name, "rb") as file: + prompt_yaml_file = yaml.full_load(file) + + prompt_list = utils.pattern_match( + prompt_name, prompt_yaml_file["prompts"].keys() + ) + + # category_name, *prompt_name = use_prompt.split(":") + # TODO allow to multiple prompt naming + # if len(prompt_name) > 1: + # prompt_list = [] + # for prompt in prompt_name: + # prompt_list.append(utils.pattern_match(prompt_name, prompts.all_template_names)) + # else: + # prompt_list = utils.pattern_match(prompt_name, prompts.all_template_names) + return [":".join([category_name, prompt]) for prompt in prompt_list] + + +class PromptString: + def __init__(self, prompt_string): + self.prompt_string = prompt_string + + def apply(self, doc): + doc_to_text = self.prompt_string["doc_to_text"] + doc_to_target = self.prompt_string["doc_to_target"] + + # TODO need a way to process doc_to_choice + if "doc_to_choice" in self.prompt_string: + raise Exception("Not yet implemented to accept doc_to_choice") + + text_string = utils.apply_template(doc_to_text, doc) + target_string = utils.apply_template(doc_to_target, doc) + + return [text_string, target_string] diff --git a/lm-evaluation-harness/lm_eval/prompts/__pycache__/__init__.cpython-310.pyc b/lm-evaluation-harness/lm_eval/prompts/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..13c10bd322bbd4b1b54cb8a624eec31d20121e8b Binary files /dev/null and b/lm-evaluation-harness/lm_eval/prompts/__pycache__/__init__.cpython-310.pyc differ diff --git a/lm-evaluation-harness/lm_eval/tasks/agieval/README.md b/lm-evaluation-harness/lm_eval/tasks/agieval/README.md new file mode 100644 index 0000000000000000000000000000000000000000..faaf47b6beab877c7ee341a8dc2fc3e14a04b021 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/agieval/README.md @@ -0,0 +1,114 @@ +# AGIEval + +### Paper + +Title: AGIEval: A Human-Centric Benchmark for Evaluating Foundation Models + +Abstract: https://arxiv.org/abs/2304.06364.pdf + +AGIEval is a human-centric benchmark specifically designed to evaluate the general abilities of foundation models in tasks pertinent to human cognition and problem-solving. +This benchmark is derived from 20 official, public, and high-standard admission and qualification exams intended for general human test-takers, such as general college admission tests (e.g., Chinese College Entrance Exam (Gaokao) and American SAT), law school admission tests, math competitions, lawyer qualification tests, and national civil service exams. + +Homepage: https://github.com/ruixiangcui/AGIEval + +### Citation + +``` +@misc{zhong2023agieval, + title={AGIEval: A Human-Centric Benchmark for Evaluating Foundation Models}, + author={Wanjun Zhong and Ruixiang Cui and Yiduo Guo and Yaobo Liang and Shuai Lu and Yanlin Wang and Amin Saied and Weizhu Chen and Nan Duan}, + year={2023}, + eprint={2304.06364}, + archivePrefix={arXiv}, + primaryClass={cs.CL} +} +``` + +Please make sure to cite all the individual datasets in your paper when you use them. We provide the relevant citation information below: + +``` +@inproceedings{ling-etal-2017-program, + title = "Program Induction by Rationale Generation: Learning to Solve and Explain Algebraic Word Problems", + author = "Ling, Wang and + Yogatama, Dani and + Dyer, Chris and + Blunsom, Phil", + booktitle = "Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)", + month = jul, + year = "2017", + address = "Vancouver, Canada", + publisher = "Association for Computational Linguistics", + url = "https://aclanthology.org/P17-1015", + doi = "10.18653/v1/P17-1015", + pages = "158--167", + abstract = "Solving algebraic word problems requires executing a series of arithmetic operations{---}a program{---}to obtain a final answer. However, since programs can be arbitrarily complicated, inducing them directly from question-answer pairs is a formidable challenge. To make this task more feasible, we solve these problems by generating answer rationales, sequences of natural language and human-readable mathematical expressions that derive the final answer through a series of small steps. Although rationales do not explicitly specify programs, they provide a scaffolding for their structure via intermediate milestones. To evaluate our approach, we have created a new 100,000-sample dataset of questions, answers and rationales. Experimental results show that indirect supervision of program learning via answer rationales is a promising strategy for inducing arithmetic programs.", +} + +@inproceedings{hendrycksmath2021, + title={Measuring Mathematical Problem Solving With the MATH Dataset}, + author={Dan Hendrycks and Collin Burns and Saurav Kadavath and Akul Arora and Steven Basart and Eric Tang and Dawn Song and Jacob Steinhardt}, + journal={NeurIPS}, + year={2021} +} + +@inproceedings{Liu2020LogiQAAC, + title={LogiQA: A Challenge Dataset for Machine Reading Comprehension with Logical Reasoning}, + author={Jian Liu and Leyang Cui and Hanmeng Liu and Dandan Huang and Yile Wang and Yue Zhang}, + booktitle={International Joint Conference on Artificial Intelligence}, + year={2020} +} + +@inproceedings{zhong2019jec, + title={JEC-QA: A Legal-Domain Question Answering Dataset}, + author={Zhong, Haoxi and Xiao, Chaojun and Tu, Cunchao and Zhang, Tianyang and Liu, Zhiyuan and Sun, Maosong}, + booktitle={Proceedings of AAAI}, + year={2020}, +} + +@article{Wang2021FromLT, + title={From LSAT: The Progress and Challenges of Complex Reasoning}, + author={Siyuan Wang and Zhongkun Liu and Wanjun Zhong and Ming Zhou and Zhongyu Wei and Zhumin Chen and Nan Duan}, + journal={IEEE/ACM Transactions on Audio, Speech, and Language Processing}, + year={2021}, + volume={30}, + pages={2201-2216} +} +``` + +### Groups and Tasks + +#### Groups + +- `agieval`: Evaluates all tasks listed below. + +- `agieval_en`: Evaluates all English subtasks: `agieval_aqua_rat`, `agieval_gaokao_english`, `agieval_logiqa_en`, `agieval_lsat_*`, `agieval_sat_*`, `agieval_math` + +- `agieval_cn`: Evaluates all Chinese subtasks: +`agieval_gaokao_biology`, `agieval_gaokao_chemistry`, `agieval_gaokao_chinese`, `agieval_gaokao_geography`, +`agieval_gaokao_history`, `agieval_gaokao_mathqa`, `agieval_gaokao_mathcloze`, `agieval_gaokao_physics`, `agieval_jec_qa_ca`, `agieval_jec_qa_kd`, `agieval_logiqa_zh` + +- `agieval_nous`: Evaluates a specific subset of AGIEval tasks (multiple-choice and english-only), namely those in https://github.com/teknium1/LLM-Benchmark-Logs/blob/main/benchmark-logs/Mistral-7B-Base.md + +#### Tasks + +- `agieval_aqua_rat` +- `agieval_gaokao_biology` +- `agieval_gaokao_chemistry` +- `agieval_gaokao_chinese` +- `agieval_gaokao_english` +- `agieval_gaokao_geography` +- `agieval_gaokao_history` +- `agieval_gaokao_mathqa` +- `agieval_gaokao_mathcloze` +- `agieval_gaokao_physics` +- `agieval_jec_qa_ca` +- `agieval_jec_qa_kd` +- `agieval_logiqa_en` +- `agieval_logiqa_zh` +- `agieval_lsat_ar` +- `agieval_lsat_lr` +- `agieval_lsat_rc` +- `agieval_sat_en` +- `agieval_sat_en_without_passage` +- `agieval_sat_math` +- `agieval_math` diff --git a/lm-evaluation-harness/lm_eval/tasks/agieval/gaokao-geography.yaml b/lm-evaluation-harness/lm_eval/tasks/agieval/gaokao-geography.yaml new file mode 100644 index 0000000000000000000000000000000000000000..2fe43bfd2cb620328dfb28ba4a4e9e6d6d093c07 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/agieval/gaokao-geography.yaml @@ -0,0 +1,6 @@ +include: aqua-rat.yaml +group: + - agieval + - agieval_cn +task: agieval_gaokao_geography +dataset_path: hails/agieval-gaokao-geography diff --git a/lm-evaluation-harness/lm_eval/tasks/agieval/gaokao-mathcloze.yaml b/lm-evaluation-harness/lm_eval/tasks/agieval/gaokao-mathcloze.yaml new file mode 100644 index 0000000000000000000000000000000000000000..74cbad1c0325c4fb9fe78df83304741553c06134 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/agieval/gaokao-mathcloze.yaml @@ -0,0 +1,25 @@ +group: + - agieval + - agieval_cn +task: agieval_gaokao_mathcloze +dataset_path: hails/agieval-gaokao-mathcloze +dataset_name: null +output_type: generate_until +training_split: null +validation_split: null +test_split: test +doc_to_text: "{{query}}" +doc_to_target: "{{answer}}" +process_results: !function utils.process_results +generation_kwargs: + max_gen_toks: 32 + do_sample: False + temperature: 0.0 + until: + - "Q:" +metric_list: + - metric: acc + aggregation: mean + higher_is_better: true +metadata: + version: 1.0 diff --git a/lm-evaluation-harness/lm_eval/tasks/agieval/logiqa-zh.yaml b/lm-evaluation-harness/lm_eval/tasks/agieval/logiqa-zh.yaml new file mode 100644 index 0000000000000000000000000000000000000000..82e688006b8272e015a74b01412ad35cfe33561e --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/agieval/logiqa-zh.yaml @@ -0,0 +1,6 @@ +include: aqua-rat.yaml +group: + - agieval + - agieval_cn +task: agieval_logiqa_zh +dataset_path: hails/agieval-logiqa-zh diff --git a/lm-evaluation-harness/lm_eval/tasks/agieval/lsat-lr.yaml b/lm-evaluation-harness/lm_eval/tasks/agieval/lsat-lr.yaml new file mode 100644 index 0000000000000000000000000000000000000000..62158e5cec196c0c7887a7236e1020ba2946da26 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/agieval/lsat-lr.yaml @@ -0,0 +1,7 @@ +include: aqua-rat.yaml +group: + - agieval + - agieval_nous + - agieval_en +task: agieval_lsat_lr +dataset_path: hails/agieval-lsat-lr diff --git a/lm-evaluation-harness/lm_eval/tasks/agieval/math.yaml b/lm-evaluation-harness/lm_eval/tasks/agieval/math.yaml new file mode 100644 index 0000000000000000000000000000000000000000..c8ec9eec608c4eaced456c36dcb5dc9047ccd84e --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/agieval/math.yaml @@ -0,0 +1,25 @@ +group: + - agieval + - agieval_en +task: agieval_math +dataset_path: hails/agieval-math +dataset_name: null +output_type: generate_until +training_split: null +validation_split: null +test_split: test +doc_to_text: "{{query}}" +doc_to_target: "{{answer}}" +process_results: !function utils.process_results +generation_kwargs: + max_gen_toks: 32 + do_sample: False + temperature: 0.0 + until: + - "Q:" +metric_list: + - metric: acc + aggregation: mean + higher_is_better: true +metadata: + version: 1.0 diff --git a/lm-evaluation-harness/lm_eval/tasks/agieval/sat-en.yaml b/lm-evaluation-harness/lm_eval/tasks/agieval/sat-en.yaml new file mode 100644 index 0000000000000000000000000000000000000000..a45dba1507a562ace2f56f9a0096ff25f767f1e6 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/agieval/sat-en.yaml @@ -0,0 +1,7 @@ +include: aqua-rat.yaml +group: + - agieval + - agieval_nous + - agieval_en +task: agieval_sat_en +dataset_path: hails/agieval-sat-en diff --git a/lm-evaluation-harness/lm_eval/tasks/code_x_glue/code-text/bleu.py b/lm-evaluation-harness/lm_eval/tasks/code_x_glue/code-text/bleu.py new file mode 100644 index 0000000000000000000000000000000000000000..654a0ae06aee49a9dd39b34648efc41ddef7d848 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/code_x_glue/code-text/bleu.py @@ -0,0 +1,241 @@ +#!/usr/bin/python +import math +import re +import sys +import xml.sax.saxutils +from typing import Any, Dict, List, Optional, Pattern, Tuple, Union + + +""" +This script was adapted from the original version by hieuhoang1972 which is part of MOSES. +""" + +# $Id: bleu.py 1307 2007-03-14 22:22:36Z hieuhoang1972 $ + +"""Provides: + +cook_refs(refs, n=4): Transform a list of reference sentences as strings into a form usable by cook_test(). +cook_test(test, refs, n=4): Transform a test sentence as a string (together with the cooked reference sentences) into a form usable by score_cooked(). +score_cooked(alltest, n=4): Score a list of cooked test sentences. + +score_set(s, testid, refids, n=4): Interface with dataset.py; calculate BLEU score of testid against refids. + +The reason for breaking the BLEU computation into three phases cook_refs(), cook_test(), and score_cooked() is to allow the caller to calculate BLEU scores for multiple test sets as efficiently as possible. +""" + +# Added to bypass NIST-style pre-processing of hyp and ref files -- wade +nonorm = 0 + +preserve_case = False +eff_ref_len = "shortest" + +normalize1: List[Tuple[Union[Pattern[str], str], str]] = [ + ("", ""), # strip "skipped" tags + (r"-\n", ""), # strip end-of-line hyphenation and join lines + (r"\n", " "), # join lines + # (r'(\d)\s+(?=\d)', r'\1'), # join digits +] +normalize1 = [(re.compile(pattern), replace) for (pattern, replace) in normalize1] + +normalize2: List[Tuple[Union[Pattern[str], str], str]] = [ + ( + r"([\{-\~\[-\` -\&\(-\+\:-\@\/])", + r" \1 ", + ), # tokenize punctuation. apostrophe is missing + ( + r"([^0-9])([\.,])", + r"\1 \2 ", + ), # tokenize period and comma unless preceded by a digit + ( + r"([\.,])([^0-9])", + r" \1 \2", + ), # tokenize period and comma unless followed by a digit + (r"([0-9])(-)", r"\1 \2 "), # tokenize dash when preceded by a digit +] +normalize2 = [(re.compile(pattern), replace) for (pattern, replace) in normalize2] + + +def normalize(s): + """Normalize and tokenize text. This is lifted from NIST mteval-v11a.pl.""" + # Added to bypass NIST-style pre-processing of hyp and ref files -- wade + if nonorm: + return s.split() + if not isinstance(s, str): + s = " ".join(s) + # language-independent part: + for pattern, replace in normalize1: + s = re.sub(pattern, replace, s) + s = xml.sax.saxutils.unescape(s, {""": '"'}) + # language-dependent part (assuming Western languages): + s = " %s " % s + if not preserve_case: + s = s.lower() # this might not be identical to the original + for pattern, replace in normalize2: + s = re.sub(pattern, replace, s) + return s.split() + + +def count_ngrams(words, n=4): + counts: Dict[Any, int] = {} + for k in range(1, n + 1): + for i in range(len(words) - k + 1): + ngram = tuple(words[i : i + k]) + counts[ngram] = counts.get(ngram, 0) + 1 + return counts + + +def cook_refs(refs, n=4): + """Takes a list of reference sentences for a single segment + and returns an object that encapsulates everything that BLEU + needs to know about them.""" + + refs = [normalize(ref) for ref in refs] + maxcounts: Dict[Tuple[str], int] = {} + for ref in refs: + counts = count_ngrams(ref, n) + for ngram, count in counts.items(): + maxcounts[ngram] = max(maxcounts.get(ngram, 0), count) + return ([len(ref) for ref in refs], maxcounts) + + +def cook_test(test, item, n=4): + """Takes a test sentence and returns an object that + encapsulates everything that BLEU needs to know about it.""" + (reflens, refmaxcounts) = item + test = normalize(test) + result: Dict[str, Any] = {} + result["testlen"] = len(test) + + # Calculate effective reference sentence length. + + if eff_ref_len == "shortest": + result["reflen"] = min(reflens) + elif eff_ref_len == "average": + result["reflen"] = float(sum(reflens)) / len(reflens) + elif eff_ref_len == "closest": + min_diff: Optional[int] = None + for reflen in reflens: + if min_diff is None or abs(reflen - len(test)) < min_diff: + min_diff = abs(reflen - len(test)) + result["reflen"] = reflen + + result["guess"] = [max(len(test) - k + 1, 0) for k in range(1, n + 1)] + + result["correct"] = [0] * n + counts = count_ngrams(test, n) + for ngram, count in counts.items(): + result["correct"][len(ngram) - 1] += min(refmaxcounts.get(ngram, 0), count) + + return result + + +def score_cooked(allcomps, n=4, ground=0, smooth=1): + totalcomps: Dict[str, Any] = { + "testlen": 0, + "reflen": 0, + "guess": [0] * n, + "correct": [0] * n, + } + for comps in allcomps: + for key in ["testlen", "reflen"]: + totalcomps[key] += comps[key] + for key in ["guess", "correct"]: + for k in range(n): + totalcomps[key][k] += comps[key][k] + logbleu = 0.0 + all_bleus: List[float] = [] + for k in range(n): + correct = totalcomps["correct"][k] + guess = totalcomps["guess"][k] + addsmooth = 0 + if smooth == 1 and k > 0: + addsmooth = 1 + logbleu += math.log(correct + addsmooth + sys.float_info.min) - math.log( + guess + addsmooth + sys.float_info.min + ) + if guess == 0: + all_bleus.append(-10000000.0) + else: + all_bleus.append(math.log(correct + sys.float_info.min) - math.log(guess)) + + logbleu /= float(n) + all_bleus.insert(0, logbleu) + + brevPenalty = min( + 0, 1 - float(totalcomps["reflen"] + 1) / (totalcomps["testlen"] + 1) + ) + for i in range(len(all_bleus)): + if i == 0: + all_bleus[i] += brevPenalty + all_bleus[i] = math.exp(all_bleus[i]) + return all_bleus + + +def bleu(refs, candidate, ground=0, smooth=1): + refs = cook_refs(refs) + test = cook_test(candidate, refs) + return score_cooked([test], ground=ground, smooth=smooth) + + +def splitPuncts(line): + return " ".join(re.findall(r"[\w]+|[^\s\w]", line)) + + +def computeMaps(predictions, goldfile): + predictionMap: Dict[str, list] = {} + goldMap: Dict[str, list] = {} + gf = open(goldfile, "r", encoding="utf-8") + + for row in predictions: + cols = row.strip().split("\t") + if len(cols) == 1: + (rid, pred) = (cols[0], "") + else: + (rid, pred) = (cols[0], cols[1]) + predictionMap[rid] = [splitPuncts(pred.strip().lower())] + + for row in gf: + (rid, pred) = row.split("\t") + if rid in predictionMap: # Only insert if the id exists for the method + if rid not in goldMap: + goldMap[rid] = [] + goldMap[rid].append(splitPuncts(pred.strip().lower())) + + sys.stderr.write("Total: " + str(len(goldMap)) + "\n") + return (goldMap, predictionMap) + + +# m1 is the reference map +# m2 is the prediction map +def bleuFromMaps(m1, m2): + score = [0] * 5 + num = 0.0 + + for key in m1: + if key in m2: + bl = bleu(m1[key], m2[key][0]) + score = [score[i] + bl[i] for i in range(0, len(bl))] + num += 1 + return [s * 100.0 / num for s in score] + + +def smoothed_bleu_4(references, predictions, **kwargs): + predictionMap = {} + goldMap = {} + + for rid, pred in enumerate(predictions): + predictionMap[rid] = [splitPuncts(pred.strip().lower())] + + for rid, row in enumerate(references): + goldMap[rid] = [splitPuncts(row.strip().lower())] + + return bleuFromMaps(goldMap, predictionMap)[0] + + +if __name__ == "__main__": + reference_file = sys.argv[1] + predictions = [] + for row in sys.stdin: + predictions.append(row) + (goldMap, predictionMap) = computeMaps(predictions, reference_file) + print(bleuFromMaps(goldMap, predictionMap)[0]) diff --git a/lm-evaluation-harness/lm_eval/tasks/code_x_glue/code-text/go.yaml b/lm-evaluation-harness/lm_eval/tasks/code_x_glue/code-text/go.yaml new file mode 100644 index 0000000000000000000000000000000000000000..7b40edc96c4ac87e4889895829a754ea2d9aa0d3 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/code_x_glue/code-text/go.yaml @@ -0,0 +1,21 @@ +group: + - codexglue_code2text +task: code2text_go +dataset_path: CM/codexglue_code2text_go +training_split: train +validation_split: validation +test_split: test +output_type: generate_until +generation_kwargs: + num_beams: 10 + max_gen_toks: 128 + until: + - "" +doc_to_text: !function utils.doc_to_text +doc_to_target: !function utils.doc_to_target +metric_list: + - metric: !function bleu.smoothed_bleu_4 + aggregation: mean + higher_is_better: True +metadata: + version: 1.0 diff --git a/lm-evaluation-harness/lm_eval/tasks/code_x_glue/code-text/java.yaml b/lm-evaluation-harness/lm_eval/tasks/code_x_glue/code-text/java.yaml new file mode 100644 index 0000000000000000000000000000000000000000..65eb024d0fbc4a052558a938fb29db5058a5bb39 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/code_x_glue/code-text/java.yaml @@ -0,0 +1,21 @@ +group: + - codexglue_code2text +task: code2text_java +dataset_path: CM/codexglue_code2text_java +training_split: train +validation_split: validation +test_split: test +output_type: generate_until +generation_kwargs: + num_beams: 10 + max_gen_toks: 128 + until: + - "" +doc_to_text: !function utils.doc_to_text +doc_to_target: !function utils.doc_to_target +metric_list: + - metric: !function bleu.smoothed_bleu_4 + aggregation: mean + higher_is_better: True +metadata: + version: 1.0 diff --git a/lm-evaluation-harness/lm_eval/tasks/code_x_glue/code-text/javascript.yaml b/lm-evaluation-harness/lm_eval/tasks/code_x_glue/code-text/javascript.yaml new file mode 100644 index 0000000000000000000000000000000000000000..c5b288192b0c88a7a9fda139422204448ebce8ca --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/code_x_glue/code-text/javascript.yaml @@ -0,0 +1,21 @@ +group: + - codexglue_code2text +task: code2text_javascript +dataset_path: CM/codexglue_code2text_javascript +training_split: train +validation_split: validation +test_split: test +output_type: generate_until +generation_kwargs: + num_beams: 10 + max_gen_toks: 128 + until: + - "" +doc_to_text: !function utils.doc_to_text +doc_to_target: !function utils.doc_to_target +metric_list: + - metric: !function bleu.smoothed_bleu_4 + aggregation: mean + higher_is_better: True +metadata: + version: 1.0 diff --git a/lm-evaluation-harness/lm_eval/tasks/code_x_glue/code-text/php.yaml b/lm-evaluation-harness/lm_eval/tasks/code_x_glue/code-text/php.yaml new file mode 100644 index 0000000000000000000000000000000000000000..e368d7daacc98459b40a4bab6634299976a73c45 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/code_x_glue/code-text/php.yaml @@ -0,0 +1,21 @@ +group: + - codexglue_code2text +task: code2text_php +dataset_path: CM/codexglue_code2text_php +training_split: train +validation_split: validation +test_split: test +output_type: generate_until +generation_kwargs: + num_beams: 10 + max_gen_toks: 128 + until: + - "" +doc_to_text: !function utils.doc_to_text +doc_to_target: !function utils.doc_to_target +metric_list: + - metric: !function bleu.smoothed_bleu_4 + aggregation: mean + higher_is_better: True +metadata: + version: 1.0 diff --git a/lm-evaluation-harness/lm_eval/tasks/code_x_glue/code-text/python.yaml b/lm-evaluation-harness/lm_eval/tasks/code_x_glue/code-text/python.yaml new file mode 100644 index 0000000000000000000000000000000000000000..e8e2cb6ce4079165725883c9e3be6ed167631750 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/code_x_glue/code-text/python.yaml @@ -0,0 +1,21 @@ +group: + - codexglue_code2text +task: code2text_python +dataset_path: CM/codexglue_code2text_python +training_split: train +validation_split: validation +test_split: test +output_type: generate_until +generation_kwargs: + num_beams: 10 + max_gen_toks: 128 + until: + - "" +doc_to_text: !function utils.doc_to_text +doc_to_target: !function utils.doc_to_target +metric_list: + - metric: !function bleu.smoothed_bleu_4 + aggregation: mean + higher_is_better: True +metadata: + version: 1.0 diff --git a/lm-evaluation-harness/lm_eval/tasks/code_x_glue/code-text/ruby.yaml b/lm-evaluation-harness/lm_eval/tasks/code_x_glue/code-text/ruby.yaml new file mode 100644 index 0000000000000000000000000000000000000000..a89134c626eda6af05399cc1ed931b7b089b5409 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/code_x_glue/code-text/ruby.yaml @@ -0,0 +1,21 @@ +group: + - codexglue_code2text +task: code2text_ruby +dataset_path: CM/codexglue_code2text_ruby +training_split: train +validation_split: validation +test_split: test +output_type: generate_until +generation_kwargs: + num_beams: 10 + max_gen_toks: 128 + until: + - "" +doc_to_text: !function utils.doc_to_text +doc_to_target: !function utils.doc_to_target +metric_list: + - metric: !function bleu.smoothed_bleu_4 + aggregation: mean + higher_is_better: True +metadata: + version: 3.0 diff --git a/lm-evaluation-harness/lm_eval/tasks/code_x_glue/code-text/utils.py b/lm-evaluation-harness/lm_eval/tasks/code_x_glue/code-text/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..6975684259648ca5d6f71d28d65fef7ad73e0bae --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/code_x_glue/code-text/utils.py @@ -0,0 +1,12 @@ +def doc_to_text(doc): + inputs = " ".join(doc["code_tokens"]).replace("\n", " ") + inputs = " ".join(inputs.strip().split()) + + return inputs + + +def doc_to_target(doc): + targets = " ".join(doc["docstring_tokens"]).replace("\n", "") + targets = " ".join(targets.strip().split()) + + return targets diff --git a/lm-evaluation-harness/lm_eval/tasks/indiccopa/__pycache__/utils.cpython-310.pyc b/lm-evaluation-harness/lm_eval/tasks/indiccopa/__pycache__/utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a6753f6cce516dbf26530bc5395a943a2c4dc2bb Binary files /dev/null and b/lm-evaluation-harness/lm_eval/tasks/indiccopa/__pycache__/utils.cpython-310.pyc differ diff --git a/lm-evaluation-harness/lm_eval/tasks/indiccopa/indiccopa_hi.yaml b/lm-evaluation-harness/lm_eval/tasks/indiccopa/indiccopa_hi.yaml new file mode 100644 index 0000000000000000000000000000000000000000..4207e316a1a086d55a5aa465803cf0535cd912ec --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/indiccopa/indiccopa_hi.yaml @@ -0,0 +1,29 @@ +# This file will be included in the generated language-specific task configs. +# It doesn't have a yaml file extension as it is not meant to be imported directly +# by the harness. +group: ai4bharat/IndicCOPA +dataset_path: ai4bharat/IndicCOPA +dataset_name: translation-hi +output_type: multiple_choice +# training_split: train +# validation_split: validation +test_split: test +# doc_to_text: "Premise: {{premise}}\nGiven the premise what is the {{question}}\nPlease Choose Among following 2 choices and label them as 0 for 1st choice and 1 for 2nd choice." +# doc_to_target: label +# doc_to_choice: "{{choice1}}{{choice2}}" +# metric_list: +# - metric: acc +# aggregation: mean +# higher_is_better: true +# metadata: +# version: 1.0 + +doc_to_text: !function utils.doc_to_text_hi +doc_to_target: label +doc_to_choice: !function utils.doc_to_choice +metric_list: + - metric: acc +metadata: + version: 1.0 + +task: indiccopa-hi diff --git a/lm-evaluation-harness/lm_eval/tasks/indiccopa/indiccopa_or.yaml b/lm-evaluation-harness/lm_eval/tasks/indiccopa/indiccopa_or.yaml new file mode 100644 index 0000000000000000000000000000000000000000..e8b8d647a7271aec10ac2b656892c8c56089ce54 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/indiccopa/indiccopa_or.yaml @@ -0,0 +1,33 @@ +# Tors file will be included in the generated language-specific task configs. +# It doesn't have a yaml file extension as it is not meant to be imported directly +# by the harness. +group: ai4bharat/IndicCOPA +dataset_path: ai4bharat/IndicCOPA +dataset_name: translation-or +output_type: multiple_choice +# training_split: train +# validation_split: validation +test_split: test +# doc_to_text: "Premise: {{premise}}\nGiven the premise what is the {{question}}\nPlease Choose Among following 2 choices and label them as 0 for 1st choice and 1 for 2nd choice." +# doc_to_target: label +# doc_to_choice: "{{choice1}}{{choice2}}" +# metric_list: +# - metric: acc +# aggregation: mean +# orgher_is_better: true +# metadata: +# version: 1.0 + +doc_to_text: !function utils.doc_to_text_or +doc_to_target: label +doc_to_choice: !function utils.doc_to_choice +metric_list: + - metric: acc +metadata: + version: 1.0 + + +# doc_to_choice: '{{[premise+", सही? हाँ, "+hypothesis,premise+", सही? इसलिए, "+hypothesis,premise+", +# सही? नहीं, "+hypothesis]}}' +# doc_to_text: '' +task: indiccopa-or diff --git a/venv/lib/python3.10/site-packages/torch/lib/libtorch_cuda.so b/venv/lib/python3.10/site-packages/torch/lib/libtorch_cuda.so new file mode 100644 index 0000000000000000000000000000000000000000..91cb5013e4bc0a14459ffc9485cd4f9b68ac050b --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/lib/libtorch_cuda.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7632e06d78ea6d2e94cc85abd2132718558878974857efa75d081cd98457c289 +size 881666489