File size: 4,895 Bytes
01aebd1 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 |
import dataclasses
from typing import Dict, Optional, Union
from lm_eval.tasks.ifeval import instructions_registry
from lm_eval.utils import eval_logger
@dataclasses.dataclass
class InputExample:
key: int
instruction_id_list: list[str]
prompt: str
kwargs: list[Dict[str, Optional[Union[str, int]]]]
@dataclasses.dataclass
class OutputExample:
instruction_id_list: list[str]
prompt: str
response: str
follow_all_instructions: bool
follow_instruction_list: list[bool]
def test_instruction_following_strict(
inp,
response,
):
"""Tests response to see if instructions are followed."""
instruction_list = inp.instruction_id_list
is_following_list = []
for index, instruction_id in enumerate(instruction_list):
instruction_cls = instructions_registry.INSTRUCTION_DICT[instruction_id]
instruction = instruction_cls(instruction_id)
# Remove None values from kwargs to avoid unexpected keyword argument errors in build_description method.
kwargs = {k: v for k, v in inp.kwargs[index].items() if v}
instruction.build_description(**kwargs)
args = instruction.get_instruction_args()
if args and "prompt" in args:
instruction.build_description(prompt=inp.prompt)
if response.strip() and instruction.check_following(response):
is_following_list.append(True)
else:
is_following_list.append(False)
return OutputExample(
instruction_id_list=inp.instruction_id_list,
prompt=inp.prompt,
response=response,
follow_all_instructions=all(is_following_list),
follow_instruction_list=is_following_list,
)
def test_instruction_following_loose(
inp,
response,
):
"""Tests response for an upper bound for following instructions."""
r = response.split("\n")
response_remove_first = "\n".join(r[1:]).strip()
response_remove_last = "\n".join(r[:-1]).strip()
response_remove_both = "\n".join(r[1:-1]).strip()
revised_response = response.replace("*", "")
revised_response_remove_first = response_remove_first.replace("*", "")
revised_response_remove_last = response_remove_last.replace("*", "")
revised_response_remove_both = response_remove_both.replace("*", "")
all_responses = [
response,
revised_response,
response_remove_first,
response_remove_last,
response_remove_both,
revised_response_remove_first,
revised_response_remove_last,
revised_response_remove_both,
]
instruction_list = inp.instruction_id_list
is_following_list = []
for index, instruction_id in enumerate(instruction_list):
instruction_cls = instructions_registry.INSTRUCTION_DICT[instruction_id]
instruction = instruction_cls(instruction_id)
# Remove None values from kwargs to avoid unexpected keyword argument errors in build_description method.
kwargs = {k: v for k, v in inp.kwargs[index].items() if v}
instruction.build_description(**kwargs)
args = instruction.get_instruction_args()
if args and "prompt" in args:
instruction.build_description(prompt=inp.prompt)
is_following = False
for r in all_responses:
if r.strip() and instruction.check_following(r):
is_following = True
break
is_following_list.append(is_following)
return OutputExample(
instruction_id_list=inp.instruction_id_list,
prompt=inp.prompt,
response=response,
follow_all_instructions=all(is_following_list),
follow_instruction_list=is_following_list,
)
def process_results(doc, results):
eval_logger.warning(
"This task is meant for chat-finetuned models, and may not give meaningful results for models other than `openai` or `anthropic` if `doc_to_text` in its YAML is not wrapped in the appropriate chat template string. This warning will be removed when chat templating support is added natively to local models"
)
inp = InputExample(
key=doc["key"],
instruction_id_list=doc["instruction_id_list"],
prompt=doc["prompt"],
kwargs=doc["kwargs"],
)
response = results[0]
out_strict = test_instruction_following_strict(inp, response)
out_loose = test_instruction_following_loose(inp, response)
return {
"prompt_level_strict_acc": out_strict.follow_all_instructions,
"inst_level_strict_acc": out_strict.follow_instruction_list,
"prompt_level_loose_acc": out_loose.follow_all_instructions,
"inst_level_loose_acc": out_loose.follow_instruction_list,
}
def agg_inst_level_acc(items):
flat_items = [item for sublist in items for item in sublist]
inst_level_acc = sum(flat_items) / len(flat_items)
return inst_level_acc
|