|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
from PIL import Image
|
|
import io
|
|
import re
|
|
from collections import defaultdict
|
|
|
|
def jgraphqa_doc_to_visual(doc):
|
|
img_data = doc["image"]["bytes"]
|
|
img = Image.open(io.BytesIO(img_data))
|
|
return [img.convert("RGB")]
|
|
|
|
|
|
def jgraphqa_doc_to_text(doc, lmms_eval_specific_kwargs):
|
|
question = doc["question"]
|
|
pre_prompt = lmms_eval_specific_kwargs["pre_prompt"]
|
|
post_prompt = lmms_eval_specific_kwargs["post_prompt"]
|
|
return f"{pre_prompt}{question}{post_prompt}"
|
|
|
|
|
|
def jgraphqa_process_results(doc, results):
|
|
pred = results[0]
|
|
parsed_pred = parse_open_response(pred)
|
|
id = doc["id"]
|
|
jgraphqa_acc = {"id": id, "subdomain": doc["type"], "answer": doc["answer"], "parsed_pred": parsed_pred}
|
|
return {
|
|
"jgraphqa_acc": jgraphqa_acc,
|
|
"submission": {
|
|
id: pred,
|
|
},
|
|
}
|
|
|
|
|
|
def jgraphqa_aggregate_results(results):
|
|
evaluation_result = {}
|
|
subset_to_eval_samples = defaultdict(list)
|
|
for result in results:
|
|
subset_to_eval_samples[result["subdomain"]].append(result)
|
|
for subset, sub_eval_samples in subset_to_eval_samples.items():
|
|
judge_dict, metric_dict = evaluate_jgraphqa(sub_eval_samples)
|
|
metric_dict.update({"num_example": len(sub_eval_samples)})
|
|
evaluation_result[subset] = metric_dict
|
|
printable_results = {}
|
|
for domain, in_domain_cats in DOMAIN_CAT2SUB_CAT.items():
|
|
in_domain_cat_results = {}
|
|
for cat_name in in_domain_cats:
|
|
if cat_name in evaluation_result.keys():
|
|
in_domain_cat_results[cat_name] = evaluation_result[cat_name]
|
|
else:
|
|
pass
|
|
in_domain_ins_acc = calculate_ins_level_acc(in_domain_cat_results)
|
|
in_domain_data_num = sum([cat_results["num_example"] for cat_results in in_domain_cat_results.values()])
|
|
printable_results["Overall-" + domain] = {
|
|
"num": int(in_domain_data_num),
|
|
"acc": round(in_domain_ins_acc, 5),
|
|
}
|
|
|
|
for cat_name, cat_results in in_domain_cat_results.items():
|
|
printable_results[cat_name] = {
|
|
"num": int(cat_results["num_example"]),
|
|
"acc": round(cat_results["acc"], 5),
|
|
}
|
|
all_ins_acc = calculate_ins_level_acc(evaluation_result)
|
|
printable_results["Overall"] = {
|
|
"num": sum([cat_results["num_example"] for cat_results in evaluation_result.values()]),
|
|
"acc": round(all_ins_acc, 5),
|
|
}
|
|
return printable_results["Overall"]["acc"]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def calculate_ins_level_acc(results):
|
|
"""Calculate the instruction level accuracy for given Subject results
|
|
https://github.com/MMMU-Benchmark/MMMU/blob/51ce7f3e829c16bb44bc5445782686b4c3508794/eval/eval_utils.py#L246
|
|
"""
|
|
acc = 0
|
|
ins_num = 0
|
|
for cat_results in results.values():
|
|
acc += cat_results["acc"] * cat_results["num_example"]
|
|
ins_num += cat_results["num_example"]
|
|
if ins_num == 0:
|
|
return 0
|
|
return acc / ins_num
|
|
|
|
|
|
DOMAIN_CAT2SUB_CAT = {
|
|
"GENERAL": [
|
|
"line",
|
|
"table",
|
|
"bar",
|
|
"circle",
|
|
],
|
|
}
|
|
|
|
|
|
def eval_open(gold_i, pred_i):
|
|
"""
|
|
Evaluate an open question instance
|
|
https://github.com/MMMU-Benchmark/MMMU/blob/51ce7f3e829c16bb44bc5445782686b4c3508794/eval/eval_utils.py#L191
|
|
"""
|
|
correct = False
|
|
if isinstance(gold_i, list):
|
|
|
|
norm_answers = []
|
|
for answer in gold_i:
|
|
norm_answers.extend(normalize_str(answer))
|
|
else:
|
|
norm_answers = normalize_str(gold_i)
|
|
for pred in pred_i:
|
|
if isinstance(pred, str):
|
|
for norm_ans in norm_answers:
|
|
|
|
if isinstance(norm_ans, str) and norm_ans in pred:
|
|
if not correct:
|
|
correct = True
|
|
break
|
|
else:
|
|
if pred in norm_answers:
|
|
if not correct:
|
|
correct = True
|
|
break
|
|
return correct
|
|
|
|
|
|
def evaluate_jgraphqa(samples):
|
|
"""
|
|
Batch evaluation for multiple choice and open questions.
|
|
https://github.com/MMMU-Benchmark/MMMU/blob/51ce7f3e829c16bb44bc5445782686b4c3508794/eval/eval_utils.py#L219
|
|
"""
|
|
pred_correct = 0
|
|
judge_dict = dict()
|
|
for sample in samples:
|
|
gold_i = sample["answer"]
|
|
pred_i = sample["parsed_pred"]
|
|
correct = eval_open(gold_i, pred_i)
|
|
|
|
if correct:
|
|
judge_dict[sample["id"]] = "Correct"
|
|
pred_correct += 1
|
|
else:
|
|
judge_dict[sample["id"]] = "Wrong"
|
|
|
|
if len(samples) == 0:
|
|
return {"acc": 0}
|
|
return judge_dict, {"acc": pred_correct / len(samples)}
|
|
|
|
|
|
def extract_numbers(string):
|
|
"""
|
|
Exact all forms of numbers from a string with regex.
|
|
"""
|
|
|
|
pattern_commas = r"-?\b\d{1,3}(?:,\d{3})+\b"
|
|
|
|
pattern_scientific = r"-?\d+(?:\.\d+)?[eE][+-]?\d+"
|
|
|
|
pattern_simple = r"-?(?:\d+\.\d+|\.\d+|\d+)(?![eE][+-]?\d+)(?![,\d])"
|
|
|
|
pattern_japanese = r"(\d+)(?:つ|個|度|円|人|年|匹|台|%)"
|
|
|
|
|
|
numbers_with_commas = re.findall(pattern_commas, string)
|
|
|
|
numbers_scientific = re.findall(pattern_scientific, string)
|
|
|
|
numbers_simple = re.findall(pattern_simple, string)
|
|
|
|
numbers_japanese = re.findall(pattern_japanese, string)
|
|
|
|
all_numbers = numbers_with_commas + numbers_scientific + numbers_simple + numbers_japanese
|
|
return all_numbers
|
|
|
|
|
|
def normalize_str(string):
|
|
"""
|
|
Normalize the str to lower case and make them float numbers if possible.
|
|
https://github.com/MMMU-Benchmark/MMMU/blob/51ce7f3e829c16bb44bc5445782686b4c3508794/eval/eval_utils.py#L76
|
|
"""
|
|
string = string.strip()
|
|
string = string.replace(",", "")
|
|
string = string.replace(" ", "")
|
|
string = string.replace(" ", "")
|
|
string = string.lower()
|
|
if len(string) == 1:
|
|
return [" " + string, string + " "]
|
|
return [string]
|
|
|
|
|
|
def parse_open_response(response):
|
|
"""
|
|
Parse the prediction from the generated response.
|
|
Return a list of predicted strings or numbers.
|
|
https://github.com/MMMU-Benchmark/MMMU/blob/51ce7f3e829c16bb44bc5445782686b4c3508794/eval/eval_utils.py#L122
|
|
"""
|
|
|
|
|
|
def get_key_subresponses(response):
|
|
key_responses = []
|
|
response = response.strip().strip("。")
|
|
sub_responses = re.split(r"[。!?.]\s*|\n", response)
|
|
|
|
indicators_of_keys = ["よって", "よって、", "答えは", "答えは、", "最終的に", "最終的に、", "解答は", "解答は、" "回答は", "回答は、"]
|
|
key_responses = []
|
|
for index, resp in enumerate(sub_responses):
|
|
|
|
if index == len(sub_responses) - 1:
|
|
indicators_of_keys.extend(["=", "="])
|
|
shortest_key_response = None
|
|
for indicator in indicators_of_keys:
|
|
if indicator in resp:
|
|
if not shortest_key_response:
|
|
shortest_key_response = resp.split(indicator)[-1].strip()
|
|
else:
|
|
if len(resp.split(indicator)[-1].strip()) < len(shortest_key_response):
|
|
shortest_key_response = resp.split(indicator)[-1].strip()
|
|
|
|
|
|
if shortest_key_response:
|
|
|
|
if shortest_key_response.strip() not in [",", ".", "!", "?", ";", ":", "'", "、", "。", "!", "?", ";", ":"]:
|
|
key_responses.append(shortest_key_response)
|
|
if len(key_responses) == 0:
|
|
return [response]
|
|
return key_responses
|
|
|
|
|
|
key_responses = get_key_subresponses(response)
|
|
|
|
pred_list = key_responses.copy()
|
|
for resp in key_responses:
|
|
pred_list.extend(extract_numbers(resp))
|
|
|
|
tmp_pred_list = []
|
|
for i in range(len(pred_list)):
|
|
tmp_pred_list.extend(normalize_str(pred_list[i]))
|
|
pred_list = tmp_pred_list
|
|
|
|
|
|
pred_list = list(set(pred_list))
|
|
|
|
return pred_list
|
|
|