peacock-data-public-datasets-idc-llm_eval
/
lm-evaluation
/build
/lib
/lm_eval
/tasks
/kobest
/utils.py
from datasets import Dataset | |
from sklearn.metrics import f1_score | |
def copa_doc_to_text(doc: dict) -> str: | |
connector = {"์์ธ": " ์๋ํ๋ฉด", "๊ฒฐ๊ณผ": " ๊ทธ๋์"}[doc["question"].strip()] | |
return f"""{doc["premise"]} {connector}""" | |
def copa_doc_to_target(doc: dict) -> str: | |
correct_choice = doc["alternative_1"] if doc["label"] == 0 else doc["alternative_2"] | |
return f"""{correct_choice}""" | |
def copa_doc_to_choice(doc: dict) -> list: | |
return [f"""{doc["alternative_1"]}""", f"""{doc["alternative_2"]}"""] | |
def sentineg_doc_to_text(doc: dict): | |
return f"""๋ฌธ์ฅ: {doc["sentence"]} ๊ธ๋ถ์ :""" | |
def wic_doc_to_text(doc: dict) -> str: | |
return f"""๋ฌธ์ฅ1: {doc["context_1"]} ๋ฌธ์ฅ2: {doc["context_2"]} ๋ ๋ฌธ์ฅ์์ {doc["word"]}๊ฐ ๊ฐ์ ๋ป์ผ๋ก ์ฐ์๋?""" | |
def hellaswag_process_doc(doc: Dataset) -> Dataset: | |
def preprocessor(dataset): | |
return { | |
"query": f"""๋ฌธ์ฅ: {dataset["context"]}""", | |
"choices": [ | |
dataset["ending_1"], | |
dataset["ending_2"], | |
dataset["ending_3"], | |
dataset["ending_4"], | |
], | |
"gold": int(dataset["label"]), | |
} | |
return doc.map(preprocessor) | |
def macro_f1_score(items): | |
unzipped_list = list(zip(*items)) | |
golds = unzipped_list[0] | |
preds = unzipped_list[1] | |
fscore = f1_score(golds, preds, average="macro") | |
return fscore | |