JGraphQA / utils.py
r-g2-2024's picture
Add files required for benchmark evaluation
4adddcc verified
# Adopted from lmms-eval from https://github.com/EvolvingLMMs-Lab/lmms-eval. Below is the original copyright:
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file was originally obtained from:
# https://github.com/EvolvingLMMs-Lab/lmms-eval/blob/main/lmms_eval/tasks/jmmmu/utils.py
#
# Minor modification by Akira Kinoshita on 2025-07-24:
from PIL import Image
import io
import re
from collections import defaultdict
def jgraphqa_doc_to_visual(doc):
img_data = doc["image"]["bytes"]
img = Image.open(io.BytesIO(img_data))
return [img.convert("RGB")]
def jgraphqa_doc_to_text(doc, lmms_eval_specific_kwargs):
question = doc["question"]
pre_prompt = lmms_eval_specific_kwargs["pre_prompt"]
post_prompt = lmms_eval_specific_kwargs["post_prompt"]
return f"{pre_prompt}{question}{post_prompt}"
def jgraphqa_process_results(doc, results):
pred = results[0]
parsed_pred = parse_open_response(pred)
id = doc["id"]
jgraphqa_acc = {"id": id, "subdomain": doc["type"], "answer": doc["answer"], "parsed_pred": parsed_pred}
return {
"jgraphqa_acc": jgraphqa_acc,
"submission": {
id: pred,
},
}
def jgraphqa_aggregate_results(results):
evaluation_result = {}
subset_to_eval_samples = defaultdict(list)
for result in results:
subset_to_eval_samples[result["subdomain"]].append(result)
for subset, sub_eval_samples in subset_to_eval_samples.items():
judge_dict, metric_dict = evaluate_jgraphqa(sub_eval_samples)
metric_dict.update({"num_example": len(sub_eval_samples)})
evaluation_result[subset] = metric_dict
printable_results = {}
for domain, in_domain_cats in DOMAIN_CAT2SUB_CAT.items():
in_domain_cat_results = {}
for cat_name in in_domain_cats:
if cat_name in evaluation_result.keys():
in_domain_cat_results[cat_name] = evaluation_result[cat_name]
else:
pass
in_domain_ins_acc = calculate_ins_level_acc(in_domain_cat_results)
in_domain_data_num = sum([cat_results["num_example"] for cat_results in in_domain_cat_results.values()])
printable_results["Overall-" + domain] = {
"num": int(in_domain_data_num),
"acc": round(in_domain_ins_acc, 5),
}
# add sub category
for cat_name, cat_results in in_domain_cat_results.items():
printable_results[cat_name] = {
"num": int(cat_results["num_example"]),
"acc": round(cat_results["acc"], 5),
}
all_ins_acc = calculate_ins_level_acc(evaluation_result)
printable_results["Overall"] = {
"num": sum([cat_results["num_example"] for cat_results in evaluation_result.values()]),
"acc": round(all_ins_acc, 5),
}
return printable_results["Overall"]["acc"]
##################
# Helper functions written by official MMMU repo.
##################
def calculate_ins_level_acc(results):
"""Calculate the instruction level accuracy for given Subject results
https://github.com/MMMU-Benchmark/MMMU/blob/51ce7f3e829c16bb44bc5445782686b4c3508794/eval/eval_utils.py#L246
"""
acc = 0
ins_num = 0
for cat_results in results.values():
acc += cat_results["acc"] * cat_results["num_example"]
ins_num += cat_results["num_example"]
if ins_num == 0:
return 0
return acc / ins_num
DOMAIN_CAT2SUB_CAT = {
"GENERAL": [
"line",
"table",
"bar",
"circle",
],
}
def eval_open(gold_i, pred_i):
"""
Evaluate an open question instance
https://github.com/MMMU-Benchmark/MMMU/blob/51ce7f3e829c16bb44bc5445782686b4c3508794/eval/eval_utils.py#L191
"""
correct = False
if isinstance(gold_i, list):
# use float to avoid trivial matches
norm_answers = []
for answer in gold_i:
norm_answers.extend(normalize_str(answer))
else:
norm_answers = normalize_str(gold_i)
for pred in pred_i: # pred is already normalized in parse response phase
if isinstance(pred, str): # if it's a string, then find if ans in the pred_i
for norm_ans in norm_answers:
# only see if the string answer in the string pred
if isinstance(norm_ans, str) and norm_ans in pred:
if not correct:
correct = True
break
else: # it's a float number
if pred in norm_answers:
if not correct:
correct = True
break
return correct
def evaluate_jgraphqa(samples):
"""
Batch evaluation for multiple choice and open questions.
https://github.com/MMMU-Benchmark/MMMU/blob/51ce7f3e829c16bb44bc5445782686b4c3508794/eval/eval_utils.py#L219
"""
pred_correct = 0
judge_dict = dict()
for sample in samples:
gold_i = sample["answer"]
pred_i = sample["parsed_pred"]
correct = eval_open(gold_i, pred_i)
if correct:
judge_dict[sample["id"]] = "Correct"
pred_correct += 1
else:
judge_dict[sample["id"]] = "Wrong"
if len(samples) == 0:
return {"acc": 0}
return judge_dict, {"acc": pred_correct / len(samples)}
def extract_numbers(string):
"""
Exact all forms of numbers from a string with regex.
"""
# Pattern for numbers with commas
pattern_commas = r"-?\b\d{1,3}(?:,\d{3})+\b"
# Pattern for scientific notation
pattern_scientific = r"-?\d+(?:\.\d+)?[eE][+-]?\d+"
# pattern_simple = r'-?\d+(?:\.\d+)?(?![eE][+-]?\d+)'
pattern_simple = r"-?(?:\d+\.\d+|\.\d+|\d+)(?![eE][+-]?\d+)(?![,\d])"
# Pattern for Japanese numbers
pattern_japanese = r"(\d+)(?:つ|個|度|円|人|年|匹|台|%)"
# Extract numbers with commas
numbers_with_commas = re.findall(pattern_commas, string)
# Extract numbers in scientific notation
numbers_scientific = re.findall(pattern_scientific, string)
# Extract simple numbers without commas
numbers_simple = re.findall(pattern_simple, string)
# Extract Japanese numbers
numbers_japanese = re.findall(pattern_japanese, string)
# Combine all extracted numbers
all_numbers = numbers_with_commas + numbers_scientific + numbers_simple + numbers_japanese
return all_numbers
def normalize_str(string):
"""
Normalize the str to lower case and make them float numbers if possible.
https://github.com/MMMU-Benchmark/MMMU/blob/51ce7f3e829c16bb44bc5445782686b4c3508794/eval/eval_utils.py#L76
"""
string = string.strip()
string = string.replace(",", "")
string = string.replace(" ", "")
string = string.replace(" ", "")
string = string.lower()
if len(string) == 1:
return [" " + string, string + " "] # avoid trivial matches
return [string]
def parse_open_response(response):
"""
Parse the prediction from the generated response.
Return a list of predicted strings or numbers.
https://github.com/MMMU-Benchmark/MMMU/blob/51ce7f3e829c16bb44bc5445782686b4c3508794/eval/eval_utils.py#L122
"""
# content = content.strip("\n").strip(".").strip(" ")
def get_key_subresponses(response):
key_responses = []
response = response.strip().strip("。")
sub_responses = re.split(r"[。!?.]\s*|\n", response)
indicators_of_keys = ["よって", "よって、", "答えは", "答えは、", "最終的に", "最終的に、", "解答は", "解答は、" "回答は", "回答は、"]
key_responses = []
for index, resp in enumerate(sub_responses):
# if last one, accept it's an equation (the entire response can be just one sentence with equation)
if index == len(sub_responses) - 1:
indicators_of_keys.extend(["=", "="])
shortest_key_response = None # the shortest response that may contain the answer (tail part of the response)
for indicator in indicators_of_keys:
if indicator in resp:
if not shortest_key_response:
shortest_key_response = resp.split(indicator)[-1].strip()
else:
if len(resp.split(indicator)[-1].strip()) < len(shortest_key_response):
shortest_key_response = resp.split(indicator)[-1].strip()
# key_responses.append(resp.split(indicator)[1].strip())
if shortest_key_response:
# and it's not trivial
if shortest_key_response.strip() not in [",", ".", "!", "?", ";", ":", "'", "、", "。", "!", "?", ";", ":"]:
key_responses.append(shortest_key_response)
if len(key_responses) == 0: # did not found any
return [response]
return key_responses
# pdb.set_trace()
key_responses = get_key_subresponses(response)
pred_list = key_responses.copy() # keep the original string response
for resp in key_responses:
pred_list.extend(extract_numbers(resp))
tmp_pred_list = []
for i in range(len(pred_list)):
tmp_pred_list.extend(normalize_str(pred_list[i]))
pred_list = tmp_pred_list
# remove duplicates
pred_list = list(set(pred_list))
return pred_list