|
import copy |
|
import os |
|
from collections import defaultdict |
|
from importlib.util import find_spec |
|
from typing import List, Literal, Optional, Tuple |
|
|
|
from tqdm import tqdm |
|
|
|
import lm_eval.models.utils |
|
from lm_eval import utils |
|
from lm_eval.api.model import LM, TemplateLM |
|
from lm_eval.api.registry import register_model |
|
from lm_eval.models.utils import retry_on_specific_exceptions |
|
from lm_eval.utils import eval_logger |
|
|
|
|
|
def get_result(response, ctxlen: int) -> Tuple[float, bool]: |
|
"""Process results from OpenAI API response. |
|
|
|
:param response: dict |
|
OpenAI API Response |
|
:param ctxlen: int |
|
Length of context (so we can slice them away and only keep the predictions) |
|
:return: |
|
continuation_logprobs: np.array |
|
Log probabilities of continuation tokens |
|
is_greedy: bool |
|
whether argmax matches given continuation exactly |
|
""" |
|
is_greedy = True |
|
logprobs = response.logprobs.token_logprobs |
|
continuation_logprobs = sum(logprobs[ctxlen:]) |
|
|
|
for i in range(ctxlen, len(response.logprobs.token_logprobs)): |
|
token = response.logprobs.token_logprobs[i] |
|
top_tokens = response.logprobs.top_logprobs[i] |
|
top_token = max(top_tokens.keys(), key=lambda x: top_tokens[x]) |
|
if top_token != token: |
|
is_greedy = False |
|
break |
|
|
|
return continuation_logprobs, is_greedy |
|
|
|
|
|
def oa_completion(client, chat: bool = False, **kwargs): |
|
"""Query OpenAI API for completion. |
|
|
|
Retry with back-off until they respond |
|
""" |
|
if not find_spec("openai") or not find_spec("tiktoken"): |
|
raise Exception( |
|
"attempted to use 'openai' LM type, but package `openai` or `tiktoken` are not installed. " |
|
"Please install these via `pip install lm-eval[openai]` or `pip install -e .[openai]`" |
|
) |
|
else: |
|
import openai |
|
|
|
def _exception_callback(e: Exception, sleep_time: float) -> None: |
|
import traceback |
|
|
|
traceback.print_exc() |
|
|
|
@retry_on_specific_exceptions( |
|
on_exceptions=[openai.OpenAIError], |
|
max_retries=None, |
|
on_exception_callback=_exception_callback, |
|
) |
|
def completion(): |
|
if chat: |
|
return client.chat.completions.create(**kwargs) |
|
else: |
|
return client.completions.create(**kwargs) |
|
|
|
return completion() |
|
|
|
|
|
@register_model("openai-completions", "local-completions") |
|
class OpenaiCompletionsLM(TemplateLM): |
|
_DEFAULT_MAX_LENGTH = 2048 |
|
|
|
def __init__( |
|
self, |
|
model: str, |
|
base_url: str = None, |
|
tokenizer: Optional[str] = None, |
|
tokenizer_backend: Literal["tiktoken", "huggingface"] = "tiktoken", |
|
truncate: bool = False, |
|
max_gen_toks: int = 256, |
|
batch_size: int = 1, |
|
seed: int = 1234, |
|
max_length: Optional[int] = None, |
|
) -> None: |
|
""" |
|
|
|
:param engine: str |
|
OpenAI API engine (e.g. gpt-3.5-turbo-instruct) |
|
:param truncate: bool |
|
Truncate input if too long (if False and input is too long, throw error) |
|
""" |
|
super().__init__() |
|
self.seed = seed |
|
try: |
|
import openai |
|
import tiktoken |
|
except ModuleNotFoundError: |
|
raise Exception( |
|
"attempted to use 'openai' LM type, but package `openai` or `tiktoken` are not installed. \ |
|
please install these via `pip install lm-eval[openai]` or `pip install -e .\"[openai]\"`", |
|
) |
|
self.model = model |
|
self.base_url = base_url |
|
self.tokenizer_backend = tokenizer_backend |
|
self.truncate = truncate |
|
self._batch_size = int(batch_size) |
|
self._max_gen_toks = max_gen_toks |
|
self._max_length = max_length |
|
|
|
|
|
if self.tokenizer_backend == "huggingface": |
|
import transformers |
|
|
|
self.tokenizer = transformers.AutoTokenizer.from_pretrained( |
|
tokenizer if tokenizer else self.model |
|
) |
|
self.vocab_size = self.tokenizer.vocab |
|
self.end_of_text_token_id = self.tokenizer.eos_token |
|
elif self.tokenizer_backend == "tiktoken": |
|
if self.base_url: |
|
eval_logger.warning( |
|
f"Passed `base_url={self.base_url}` but using Tiktoken tokenizer backend. " |
|
"Pass `tokenizer_backend=huggingface` and provide the HF tokenizer name if your model does not use Tiktoken." |
|
) |
|
|
|
self.tokenizer = tiktoken.encoding_for_model(self.model) |
|
self.vocab_size = self.tokenizer.n_vocab |
|
self.end_of_text_token_id = self.tokenizer.eot_token |
|
else: |
|
raise ValueError( |
|
f"Expected tokenizer_backend to be one of ['tiktoken', 'huggingface'] but got {self.tokenizer_backend}" |
|
) |
|
|
|
|
|
|
|
openai.api_key = os.environ["OPENAI_API_KEY"] |
|
if self.base_url: |
|
self.client = openai.OpenAI(base_url=self.base_url) |
|
else: |
|
self.client = openai.OpenAI() |
|
|
|
@property |
|
def eot_token_id(self): |
|
return self.end_of_text_token_id |
|
|
|
@property |
|
def max_length(self) -> int: |
|
if self._max_length: |
|
return self._max_length |
|
else: |
|
return self._DEFAULT_MAX_LENGTH |
|
|
|
@property |
|
def max_gen_toks(self) -> int: |
|
return self._max_gen_toks |
|
|
|
@property |
|
def batch_size(self) -> int: |
|
return self._batch_size |
|
|
|
@property |
|
def device(self): |
|
|
|
raise NotImplementedError() |
|
|
|
def tok_encode(self, string: str, **kwargs) -> List[int]: |
|
return self.tokenizer.encode(string) |
|
|
|
def tok_decode(self, tokens: List[int]) -> str: |
|
return self.tokenizer.decode(tokens) |
|
|
|
def _loglikelihood_tokens( |
|
self, requests, disable_tqdm: bool = False |
|
) -> List[Tuple[float, bool]]: |
|
res = [] |
|
|
|
def _collate(x): |
|
|
|
|
|
|
|
toks = x[1] + x[2] |
|
return -len(toks), tuple(toks) |
|
|
|
re_ord = utils.Reorderer(requests, _collate) |
|
|
|
for chunk in tqdm( |
|
list(lm_eval.models.utils.chunks(re_ord.get_reordered(), self.batch_size)), |
|
disable=disable_tqdm, |
|
): |
|
inps = [] |
|
ctxlens = [] |
|
for cache_key, context_enc, continuation_enc in chunk: |
|
|
|
inp = (context_enc + continuation_enc)[-(self.max_length + 1) :] |
|
|
|
ctxlen = len(context_enc) - max( |
|
0, len(context_enc) + len(continuation_enc) - (self.max_length + 1) |
|
) |
|
|
|
inps.append(inp) |
|
ctxlens.append(ctxlen) |
|
|
|
response = oa_completion( |
|
client=self.client, |
|
model=self.model, |
|
prompt=inps, |
|
echo=True, |
|
max_tokens=0, |
|
temperature=0.0, |
|
logprobs=10, |
|
seed=self.seed, |
|
) |
|
|
|
for resp, ctxlen, (cache_key, context_enc, continuation_enc) in zip( |
|
response.choices, ctxlens, chunk |
|
): |
|
answer = get_result(resp, ctxlen) |
|
|
|
res.append(answer) |
|
|
|
|
|
if cache_key is not None: |
|
self.cache_hook.add_partial("loglikelihood", cache_key, answer) |
|
return re_ord.get_original(res) |
|
|
|
def generate_until(self, requests, disable_tqdm: bool = False) -> List[str]: |
|
if not requests: |
|
return [] |
|
res = [] |
|
requests = [req.args for req in requests] |
|
|
|
def _collate(x): |
|
toks = self.tok_encode(x[0]) |
|
return len(toks), x[0] |
|
|
|
re_ord = utils.Reorderer(requests, _collate) |
|
|
|
def sameuntil_chunks(xs, size): |
|
ret = [] |
|
lastuntil = xs[0][1] |
|
for x in xs: |
|
if len(ret) >= size or x[1] != lastuntil: |
|
yield ret, lastuntil |
|
ret = [] |
|
lastuntil = x[1] |
|
ret.append(x) |
|
|
|
if ret: |
|
yield ret, lastuntil |
|
|
|
|
|
for chunk, request_args in tqdm( |
|
list(sameuntil_chunks(re_ord.get_reordered(), self.batch_size)), |
|
disable=disable_tqdm, |
|
): |
|
inps = [] |
|
self._max_gen_toks = request_args.get("max_gen_toks", self.max_gen_toks) |
|
for context, _ in chunk: |
|
context_enc = self.tok_encode(context) |
|
inp = context_enc[-(self.max_length - self.max_gen_toks) :] |
|
inps.append(inp) |
|
|
|
until = request_args.get("until", ["<|endoftext|>"]) |
|
request_args["temperature"] = request_args.get("temperature", 0) |
|
|
|
response = oa_completion( |
|
client=self.client, |
|
model=self.model, |
|
prompt=inps, |
|
max_tokens=self.max_gen_toks, |
|
stop=until, |
|
seed=self.seed, |
|
**{ |
|
k: v |
|
for k, v in request_args.items() |
|
if k not in {"do_sample", "max_gen_toks", "until"} |
|
}, |
|
) |
|
for resp, (context, args_) in zip(response.choices, chunk): |
|
s = getattr(resp, "text") |
|
|
|
until_ = until |
|
|
|
for term in until_: |
|
if len(term) > 0: |
|
s = s.split(term)[0] |
|
|
|
|
|
self.cache_hook.add_partial( |
|
"generate_until", (context, {"until": until_}), s |
|
) |
|
|
|
res.append(s) |
|
return re_ord.get_original(res) |
|
|
|
def _model_call(self, inps): |
|
|
|
raise NotImplementedError() |
|
|
|
def _model_generate(self, context, max_length, eos_token_id): |
|
|
|
raise NotImplementedError() |
|
|
|
def loglikelihood_rolling( |
|
self, requests, disable_tqdm: bool = False |
|
) -> List[float]: |
|
loglikelihoods = [] |
|
|
|
for (string,) in tqdm([req.args for req in requests], disable=disable_tqdm): |
|
rolling_token_windows = list( |
|
map( |
|
utils.make_disjoint_window, |
|
utils.get_rolling_token_windows( |
|
token_list=self.tok_encode(string), |
|
prefix_token=self.eot_token_id, |
|
max_seq_len=self.max_length, |
|
context_len=1, |
|
), |
|
) |
|
) |
|
|
|
|
|
rolling_token_windows = [(None,) + x for x in rolling_token_windows] |
|
|
|
string_nll = self._loglikelihood_tokens( |
|
rolling_token_windows, |
|
disable_tqdm=True, |
|
) |
|
|
|
|
|
string_nll = [x[0] for x in string_nll] |
|
|
|
string_nll = sum(string_nll) |
|
loglikelihoods.append(string_nll) |
|
return loglikelihoods |
|
|
|
|
|
@register_model("openai-chat-completions", "local-chat-completions") |
|
class OpenaiChatCompletionsLM(LM): |
|
def __init__( |
|
self, |
|
model: str = "gpt-3.5-turbo", |
|
base_url: str = None, |
|
truncate: bool = False, |
|
**kwargs, |
|
) -> None: |
|
""" |
|
|
|
:param model: str |
|
Implements an OpenAI-style chat completion API for |
|
accessing both OpenAI OR locally-hosted models using |
|
HuggingFace Tokenizer |
|
OpenAI API model (e.g. gpt-3.5-turbo) |
|
using the **gen_kwargs passed on init |
|
:param truncate: bool |
|
Truncate input if too long (if False and input is too long, throw error) |
|
""" |
|
super().__init__() |
|
try: |
|
import openai |
|
except ModuleNotFoundError: |
|
raise Exception( |
|
"attempted to use 'openai' LM type, but package `openai` or `tiktoken` are not installed. \ |
|
please install these via `pip install lm-eval[openai]` or `pip install -e .[openai]`", |
|
) |
|
self.model = model |
|
self.base_url = base_url |
|
self.truncate = truncate |
|
|
|
|
|
|
|
if self.base_url: |
|
self.client = openai.OpenAI(base_url=self.base_url) |
|
else: |
|
self.client = openai.OpenAI() |
|
|
|
@property |
|
def max_length(self) -> int: |
|
|
|
return 2048 |
|
|
|
@property |
|
def max_gen_toks(self) -> int: |
|
return 256 |
|
|
|
@property |
|
def batch_size(self): |
|
|
|
raise NotImplementedError() |
|
|
|
@property |
|
def device(self): |
|
|
|
raise NotImplementedError() |
|
|
|
def generate_until(self, requests, disable_tqdm: bool = False) -> List[str]: |
|
res = defaultdict(list) |
|
re_ords = {} |
|
|
|
|
|
|
|
|
|
grouper = lm_eval.models.utils.Grouper(requests, lambda x: str(x.args[1])) |
|
for key, reqs in grouper.get_grouped().items(): |
|
|
|
re_ords[key] = utils.Reorderer( |
|
[req.args for req in reqs], lambda x: (-len(x[0]), x[0]) |
|
) |
|
|
|
pbar = tqdm(total=len(requests), disable=(disable_tqdm or (self.rank != 0))) |
|
for key, re_ord in re_ords.items(): |
|
|
|
|
|
|
|
chunks = lm_eval.models.utils.chunks(re_ord.get_reordered(), n=1) |
|
for chunk in chunks: |
|
contexts, all_gen_kwargs = zip(*chunk) |
|
inps = [{"role": "user", "content": context} for context in contexts] |
|
|
|
gen_kwargs = all_gen_kwargs[0] |
|
until = None |
|
if isinstance(kwargs := copy.deepcopy(gen_kwargs), dict): |
|
if "do_sample" in kwargs.keys(): |
|
kwargs.pop("do_sample") |
|
if "until" in kwargs.keys(): |
|
until = kwargs.pop("until") |
|
if isinstance(until, str): |
|
until = [kwargs] |
|
elif not isinstance(until, list): |
|
raise ValueError( |
|
f"Expected repr(kwargs['until']) to be of type Union[str, list] but got {until}" |
|
) |
|
kwargs["stop"] = until |
|
kwargs["max_tokens"] = kwargs.pop("max_gen_toks", self.max_gen_toks) |
|
else: |
|
raise ValueError( |
|
f"Expected repr(kwargs) to be of type repr(dict) but got {kwargs}" |
|
) |
|
|
|
response = oa_completion( |
|
client=self.client, |
|
chat=True, |
|
messages=inps, |
|
model=self.model, |
|
**kwargs, |
|
) |
|
|
|
for resp, (context, args_) in zip(response.choices, chunk): |
|
s = resp.message.content |
|
|
|
if until is not None: |
|
for term in until: |
|
if len(term) > 0: |
|
s = s.split(term)[0] |
|
|
|
res[key].append(s) |
|
|
|
self.cache_hook.add_partial( |
|
"generate_until", (context, {"until": until}), s |
|
) |
|
pbar.update(1) |
|
|
|
res[key] = re_ord.get_original(res[key]) |
|
|
|
pbar.close() |
|
|
|
return grouper.get_original(res) |
|
|
|
def loglikelihood(self, requests, disable_tqdm: bool = False): |
|
raise NotImplementedError("No support for logits.") |
|
|
|
def loglikelihood_rolling(self, requests, disable_tqdm: bool = False): |
|
raise NotImplementedError("No support for logits.") |
|
|