diff --git a/.github/scripts/assert_score.py b/.github/scripts/assert_score.py new file mode 100644 index 0000000000000000000000000000000000000000..5d5fbfe5c529efd69a0fb589ab5f6b3d827923bb --- /dev/null +++ b/.github/scripts/assert_score.py @@ -0,0 +1,61 @@ +import argparse +import ast +import json +import os + +import pandas as pd + + +def validate_scores(dataset_list, assert_score, model_name): + for dataset in dataset_list: + base_score = assert_score[dataset][model_name] + if dataset == "OCRBench_MINI": + score_file = os.path.join("outputs", f"{model_name}/{model_name}_{dataset}_score.json") + cur_score = 0 + with open(score_file, "r") as f: + total_score = json.load(f) + cur_score = total_score["Final Score Norm"] + assert ( + abs(cur_score - float(base_score)) <= 0.01 + ), f"{dataset} on {model_name}: cur_score is {cur_score}, base_score is {base_score}" + else: + score_file = os.path.join("outputs", f"{model_name}/{model_name}_{dataset}_acc.csv") + df = pd.read_csv(score_file) + cur_score = df["Overall"].iloc[0] + if dataset == "MMBench_V11_MINI": + cur_score = df.loc[df["split"] == "dev", "Overall"].values + assert ( + abs(cur_score - float(base_score)) <= 0.01 + ), f"{dataset} on {model_name}: cur_score is {cur_score}, base_score is {base_score}" + print(f"cur_score is {cur_score}, base_score is {base_score}") + + +def parse_arguments(): + parser = argparse.ArgumentParser(description="Validate model scores against csv/json data") + + parser.add_argument("--dataset", type=str, required=True, help="Space-separated list of datasets") + + parser.add_argument( + "--base_score", type=str, required=True, help="Dictionary string in format {dataset:{model:score}}" + ) + + parser.add_argument("--model-name", type=str, required=True, help="Name of the model to validate") + + return parser.parse_args() + + +def main(): + args = parse_arguments() + + try: + dataset_list = args.dataset.split() + base_score = ast.literal_eval(args.base_score) + except Exception as e: + print(f"Parameter parsing error: {str(e)}") + return + + validate_scores(dataset_list, base_score, args.model_name) + + +if __name__ == "__main__": + main() diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml new file mode 100644 index 0000000000000000000000000000000000000000..1eb46dcbb3fc7191259c017069e3206f3638398d --- /dev/null +++ b/.github/workflows/lint.yml @@ -0,0 +1,23 @@ +name: lint + +on: [push, pull_request] + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + +jobs: + lint: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - name: Set up Python 3.10 + uses: actions/setup-python@v2 + with: + python-version: 3.10.15 + - name: Install pre-commit hook + run: | + pip install pre-commit + pre-commit install + - name: Linting + run: pre-commit run --all-files diff --git a/.github/workflows/pr-run-test.yml b/.github/workflows/pr-run-test.yml new file mode 100644 index 0000000000000000000000000000000000000000..4d29116146b9be5204e90212ed57bddefdccce90 --- /dev/null +++ b/.github/workflows/pr-run-test.yml @@ -0,0 +1,47 @@ +name: pr_run_test + +on: + pull_request: + branches: + - "main" + paths-ignore: + - "docs/**" + - "**.md" + +concurrency: + group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} + cancel-in-progress: true + +env: + BASE_SCORE: '{"MMBench_V11_MINI":{"Qwen2-VL-7B-Instruct":0.8727272727272727,"InternVL2_5-8B":0.8727272727272727,"llava_onevision_qwen2_7b_si":0.8363636363636363},"MMStar_MINI":{"Qwen2-VL-7B-Instruct":0.6266666666666667,"InternVL2_5-8B":0.6333333333333333,"llava_onevision_qwen2_7b_si":0.49333333333333335},"AI2D_MINI":{"Qwen2-VL-7B-Instruct":0.7854251012145749,"InternVL2_5-8B":0.8421052631578947,"llava_onevision_qwen2_7b_si":0.8178137651821862},"OCRBench_MINI":{"Qwen2-VL-7B-Instruct":16.6,"InternVL2_5-8B":16.4,"llava_onevision_qwen2_7b_si":12.9}}' + +jobs: + vlm_test: + if: ${{!cancelled()}} + runs-on: [linux-a100] + strategy: + fail-fast: false + matrix: + model: [Qwen/Qwen2-VL-7B-Instruct,OpenGVLab/InternVL2_5-8B,lmms-lab/llava-onevision-qwen2-7b-si] + dataset: ["MMBench_V11_MINI MMStar_MINI AI2D_MINI","OCRBench_MINI"] + container: + image: kkscilife/vlmevalkit_2:a100 + options: "--gpus=all --ipc=host -e https_proxy=$https_proxy -e http_proxy=$http_proxy --pull never" + volumes: + - /mnt/187:/mnt/187 + steps: + - name: clone_repo + uses: actions/checkout@v3 + - name: evaluation_model + run: | + pip install -e . + pre_model=$(echo ${{matrix.model}} | awk -F'/' '{print $1}') + ln -s /mnt/187/$pre_model . + if [ "${{matrix.model}}" = "lmms-lab/llava-onevision-qwen2-7b-si" ];then + model_name="llava_onevision_qwen2_7b_si" + else + model_name=$(echo ${{matrix.model}} | awk -F'/' '{print $2}') + fi + nvidia-smi + python run.py --data ${{matrix.dataset}} --model $model_name + python .github/scripts/assert_score.py --dataset "${{matrix.dataset}}" --base_score $BASE_SCORE --model-name $model_name diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..3ba87177a0a22267f3a81c272b997661b51e8c13 --- /dev/null +++ b/.gitignore @@ -0,0 +1,201 @@ +outputs/ +public_eval/ +*.xlsx +*.pkl +*.csv +.idea/ + +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST +.vscode/ +.gradio/ + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +*.py,cover +.hypothesis/ +.pytest_cache/ +cover/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 +db.sqlite3-journal + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +.pybuilder/ +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# IPython +profile_default/ +ipython_config.py + +# pyenv +# For a library or package, you might want to ignore these files since the code is +# intended to run in multiple environments; otherwise, check them in: +# .python-version + +# pipenv +# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. +# However, in case of collaboration, if having platform-specific dependencies or dependencies +# having no cross-platform support, pipenv may install dependencies that don't work, or not +# install all needed dependencies. +#Pipfile.lock + +# poetry +# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. +# This is especially recommended for binary packages to ensure reproducibility, and is more +# commonly ignored for libraries. +# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control +#poetry.lock + +# pdm +# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. +#pdm.lock +# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it +# in version control. +# https://pdm.fming.dev/#use-with-ide +.pdm.toml + +# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm +__pypackages__/ + +# Celery stuff +celerybeat-schedule +celerybeat.pid + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ + +# pytype static type analyzer +.pytype/ + +# Cython debug symbols +cython_debug/ + +# Images +images/ + +scripts/*ttf +.history +cache_dir/* + +# Evaluation Outputs +outputs/* +demo.ipynb +*json +.vscode +*.swp +GPT4o_MINI/ + +2weiyun* +script.py +Gemini* +Claude3-5V* +GLM4V* +GPT4o* +GPT4V* +mmmu_debug +bailingMM +BailingMM* +SenseChat* +Step* +DoubaoVL +arch +BlueLM* +mmb_* +Reka* +Taiyi +TeleMM +apple.jpg +assets/LOGO.png +api_list.txt +vlmeval/gemini_tmp.py \ No newline at end of file diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..41f53645d30c369f2bfab0d32e5ff519b9186b06 --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,31 @@ +exclude: | + (?x)^( + scripts/| + assets/| + vlmeval/config.py | + vlmeval/dataset/utils/wemath.py | + ) +repos: + - repo: https://github.com/PyCQA/flake8 + rev: 6.1.0 + hooks: + - id: flake8 + args: ["--max-line-length=120", "--ignore=F401,F403,F405,E402,E722,E741,W503,E231,E702"] + exclude: ^configs/ + - repo: https://github.com/pre-commit/mirrors-yapf + rev: v0.30.0 + hooks: + - id: yapf + args: ["--style={column_limit=120}"] + - repo: https://github.com/pre-commit/pre-commit-hooks + rev: v3.1.0 + hooks: + - id: trailing-whitespace + - id: check-yaml + - id: end-of-file-fixer + - id: requirements-txt-fixer + - id: check-merge-conflict + - id: fix-encoding-pragma + args: ["--remove"] + - id: mixed-line-ending + args: ["--fix=lf"] diff --git a/EMMA/README.md b/EMMA/README.md new file mode 100644 index 0000000000000000000000000000000000000000..e36215d9f6e2a337ee9ad7e95ecf775f9d799921 --- /dev/null +++ b/EMMA/README.md @@ -0,0 +1,159 @@ +

+
+

+ +# EMMA: An Enhanced MultiModal ReAsoning Benchmark + +🌟 This is the official repository for the paper "[Can MLLMs Reason in Multimodality? EMMA: An Enhanced MultiModal ReAsoning Benchmark](https://www.arxiv.org/abs/2501.05444)", which contains generation and evaluation code for the **EMMA** benchmark. + +[[🌐 Homepage](https://emma-benchmark.github.io/)] [[🤗EMMA](https://huggingface.co/datasets/luckychao/EMMA)] [[🤗EMMA-mini](https://huggingface.co/datasets/luckychao/EMMA-mini)] [[📖 ArXiv Paper](https://www.arxiv.org/abs/2501.05444)] + +## 💥 News +- **[2025.1.23]** 🔍 We've updated the leaderboard with the results of the [QVQ-72B-Preview](https://huggingface.co/Qwen/QVQ-72B-Preview) model included. +- **[2025.1.10]** Our dataset is now accessible at [Huggingface Datasets](https://huggingface.co/datasets/luckychao/EMMA). +- **[2025.1.10]** Our paper is now accessible at https://arxiv.org/abs/2501.05444. + +## 👀 About EMMA + +The ability to organically reason **over** and **with** both text and images is a pillar of human intelligence, yet the ability of Multimodal Large Language Models (MLLMs) to perform such multimodal reasoning remains under-explored. +We introduce **EMMA (Enhanced MultiModal reAsoning)**, a benchmark targeting organic multimodal reasoning across mathematics, physics, chemistry, and coding. +EMMA tasks demand advanced cross-modal reasoning that cannot be solved by thinking separately in each modality, offering an enhanced test suite for MLLMs' reasoning capabilities. + +EMMA is composed of 2,788 problems, of which 1,796 are newly constructed, across four domains. Within each subject, we further provide fine-grained labels for each question based on the specific skills it measures. + +

+
+ Overview of EMMA. +

+ +Our evaluation of state-of-the-art MLLMs on EMMA reveals significant limitations in handling complex multimodal and multi-step reasoning tasks, with even advanced techniques like Chain-of-Thought prompting and test-time compute scaling underperforming. +These findings underscore the need for improved multimodal architectures and training paradigms to close the gap between human and model reasoning in multimodality. + +## 🏆 Leaderboard + +The leaderboard is available [here](https://emma-benchmark.github.io/#leaderboard). + +## 📖 Dataset Usage + +### Data Downloading + +To create a more balanced subset of EMMA, we randomly sample 400 questions (100 per subject) from the benchmark and get EMMA-mini[🤗](https://huggingface.co/datasets/luckychao/EMMA-mini). + +You can download both two datasets by the following command (Taking downloading math data as an example): + +```python +from datasets import load_dataset + +dataset = load_dataset("luckychao/EMMA", "Math", split="test") +``` + +```python +from datasets import load_dataset + +dataset = load_dataset("luckychao/EMMA-mini", "Math", split="test") +``` + +### Data Format + +The dataset is provided in jsonl format and contains the following attributes: + +``` +{ + "pid": [string] Problem ID, e.g., “math_1”, + "question": [string] The question text, + "options": [list] Choice options for multiple-choice problems. For free-form problems, this could be a 'none' value, + "answer": [string] The correct answer for the problem, + "image_1": [image] , + "image_2": [image] , + "image_3": [image] , + "image_4": [image] , + "image_5": [image] , + "solution": [string] The detailed thinking steps required to solve the problem, + "subject": [string] The subject of data, e.g., “Math”, “Physics”..., + "task": [string] The task of the problem, e.g., “Code Choose Vis”, + "category": [string] The category of the problem, e.g., “2D Transformation”, + "source": [string] The original source dataset of the data, e.g., “math-vista”. For handmade data, this could be “Newly annotated” , + "type": [string] Types of questions, e.g., “Multiple Choice”, “Open-ended”, + "context": [string] Background knowledge required for the question. For problems without context, this could be a 'none' value, +} +``` + +## 📈 Evaluation + +### Responses Generation +Our repository supports the evaluation of open source models such as Qwen2-VL, InternVL, LLaVA, and closed source models such as GPT, Gemini, Claude, etc. +You can generate responses of these models by using the following commands: + +Open-source Model: +``` + python generate_response.py \ + --split 'test' \ + --subject 'Math' 'Physics' 'Chemistry' 'Coding' \ + --strategy 'CoT' \ + --config_path 'configs/gpt.yaml' \ + --model_path 'path_to_your_local_model' \ + --output_path 'path_to_output_file' \ + --max_tokens 4096 \ + --temperature 0.7 \ + --save_every 20 +``` + +Close-source Model: + +``` + python generate_response.py \ + --dataset_name 'luckychao/EMMA' \ + --split 'test' \ + --subject 'Math' 'Physics' 'Chemistry' 'Coding' \ + --strategy 'CoT' \ + --config_path 'configs/gpt.yaml' \ + --model 'remote-model-name' \ + --api_key '' \ + --output_path 'path_to_output_file' \ + --max_tokens 4096 \ + --temperature 0 \ + --save_every 20 +``` + +### Answer Evaluation + +Once all the model outputs have been generated, execute the `evaluate.py` function to extract the short answer text from the detailed response and evaluate the correctness of the answers. +We offer two evaluation methods: **Fast-eval** and **LLMs-eval**. The fast-eval method employs rule-based extraction for quicker processing, while the LLMs-eval method leverages advanced models like GPT-4o to enhance precision in extraction and evaluation. + +Fast-extract: +``` +python evaluate.py \ +--results_dir 'path_to_your_results_dir' \ +--response_label 'response' \ +--save_every 20 +``` + +LLMs-eval: +``` +python evaluate.py \ +--results_dir 'path_to_your_results_dir' \ +--response_label 'response' \ +--save_every 20 \ +--gpt_eval \ +--api_key '' \ +--model 'chatgpt-4o-latest' +``` + +### Score Calculation + +Finally, execute `python evaluation/calculate_acc.py` to calculate the final score based on the evaluation results. +This step will compute overall accuracy as well as accuracy for each subject, category, and tasks. + + +## 📝Citation + +If you find our benchmark useful in your research, please consider citing this BibTex: + +``` +@article{hao2025can, + title={Can MLLMs Reason in Multimodality? EMMA: An Enhanced MultiModal ReAsoning Benchmark}, + author={Hao, Yunzhuo and Gu, Jiawei and Wang, Huichen Will and Li, Linjie and Yang, Zhengyuan and Wang, Lijuan and Cheng, Yu}, + journal={arXiv preprint arXiv:2501.05444}, + year={2025} +} +``` diff --git a/EMMA/__init__.py b/EMMA/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/EMMA/assets/EMMA.jpg b/EMMA/assets/EMMA.jpg new file mode 100644 index 0000000000000000000000000000000000000000..dedf4a3a13c11cfb118506e486a5dc20e1126ba6 --- /dev/null +++ b/EMMA/assets/EMMA.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8dca63355e2bcd79edcfd58fb923aff6884ec1684777773532de8f9d6b048602 +size 15371801 diff --git a/EMMA/assets/emma-small.jpg b/EMMA/assets/emma-small.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ca8b1ddaa98d338f824d319b658cb72aac37e480 --- /dev/null +++ b/EMMA/assets/emma-small.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c1fa8142e684874802c36c9d0a1b8ed032c52c0ee246a6aeb182d94359e7de3d +size 387368 diff --git a/EMMA/configs/gpt.yaml b/EMMA/configs/gpt.yaml new file mode 100644 index 0000000000000000000000000000000000000000..5e292fd7e4c29cbf90f40b4db04df0db31b88472 --- /dev/null +++ b/EMMA/configs/gpt.yaml @@ -0,0 +1,20 @@ +Strategy_Instruction: + CoT: "Please solve the problem step by step." + Directly: "Please ensure that your output only contains the final answer without any additional content (such as intermediate reasoning steps)." + TrainCoT: "Output the thinking process in and final answer in tags." + +multi_choice_format: + "{context} + +{question} + +{options} + +Answer with the option's letter from the given choices and put the letter in one \"\\boxed{{}}\". " + +open_ended_format: + "{context} + + {question} + +Answer the question using a single word or phrase and put the answer in one \"\\boxed{{}}\". " diff --git a/EMMA/data_utils.py b/EMMA/data_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..767de7e1ffc0b3609aecc37579a8e940ad0b6c2c --- /dev/null +++ b/EMMA/data_utils.py @@ -0,0 +1,60 @@ +import yaml +import json + + +def load_yaml(file_path): + with open(file_path, 'r') as stream: + try: + yaml_dict = yaml.safe_load(stream) + return yaml_dict + except yaml.YAMLError as exc: + print(exc) + return None + + +def verify_response(response): + if isinstance(response, str): + response = response.strip() + if response == "" or response is None: + return False + if "Response Error" in response: + return False + return True + + +def build_query(sample, config, strategy): + """Build the text query by combining the context, question and options. The token is still there""" + context = sample['context'] + question = sample['question'] + example = "" + res_dict = {} + if sample['type'].lower() == 'multiple choice': + options = sample['options'] + start_chr = 'A' + for option in options: + example += f"{start_chr}: {option}\n" + start_chr = chr(ord(start_chr) + 1) + empty_prompt_sample_structure = config['multi_choice_format'] + empty_prompt = empty_prompt_sample_structure.format(context=context, question=question, options=example) + if strategy == 'CoT': + res_dict['query'] = empty_prompt + config['Strategy_Instruction']['CoT'] + elif strategy == 'TrainCoT': + res_dict['query'] = "Question: " + empty_prompt + config['Strategy_Instruction']['TrainCoT'] + else: + res_dict['query'] = empty_prompt + config['Strategy_Instruction']['Directly'] + + res_dict['gt_content'] = options[ord(sample['answer'].upper()) - ord('A')] + else: + empty_prompt_sample_structure = config['open_ended_format'] + empty_prompt = empty_prompt_sample_structure.format(context=context, question=question) + if strategy == 'CoT': + res_dict['query'] = empty_prompt + config['Strategy_Instruction']['CoT'] + elif strategy == 'TrainCoT': + res_dict['query'] = "Question: " + empty_prompt + config['Strategy_Instruction']['TrainCoT'] + else: + res_dict['query'] = empty_prompt + config['Strategy_Instruction']['Directly'] + res_dict['gt_content'] = sample['answer'] + + # append existing key and value in data + res_dict.update(sample) + return res_dict diff --git a/EMMA/do_full_eval.py b/EMMA/do_full_eval.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/EMMA/evaluation/calculate_acc.py b/EMMA/evaluation/calculate_acc.py new file mode 100644 index 0000000000000000000000000000000000000000..f36c1ed13e1c2821dd27c88e866fb198af6eaa80 --- /dev/null +++ b/EMMA/evaluation/calculate_acc.py @@ -0,0 +1,130 @@ +import argparse +import logging +import os +import json +from collections import defaultdict + + +def gen_score(input_file, output_file, logger=logging.getLogger(__name__)): + with open(input_file, "r") as f: + data = json.load(f) + + total_correct = 0 + total_count = 0 + + subject_stats = defaultdict(lambda: {"correct": 0, "total": 0}) + type_stats = defaultdict(lambda: {"correct": 0, "total": 0}) + category_stats = defaultdict(lambda: defaultdict(lambda: {"correct": 0, "total": 0})) + task_stats = defaultdict(lambda: {"correct": 0, "total": 0}) + + for key, entry in data.items(): + total_count += 1 + is_correct = 1 if entry["true_false"] else 0 + total_correct += is_correct + + subject = entry["subject"] + question_type = entry["type"].lower() + if entry["category"]: + if subject == "Coding": + category_list = entry["category"].split(';') + for category in category_list: + category = category.strip() + category_stats[subject][category]["total"] += 1 + category_stats[subject][category]["correct"] += is_correct + else: + category = entry["category"] + category_stats[subject][category]["total"] += 1 + category_stats[subject][category]["correct"] += is_correct + if entry["task"]: + task = subject + '_' + entry["task"] + task_stats[task]["total"] += 1 + task_stats[task]["correct"] += is_correct + + subject_stats[subject]["total"] += 1 + subject_stats[subject]["correct"] += is_correct + + type_stats[question_type]["total"] += 1 + type_stats[question_type]["correct"] += is_correct + + + + average_accuracy = total_correct / total_count if total_count > 0 else 0 + logger.info(f"Average accuracy: {average_accuracy}") + + score = { + "average": { + "accuracy": average_accuracy, + "correct": total_correct, + "total": total_count + }, + "subject": { + subject: { + "accuracy": stats["correct"] / stats["total"] if stats["total"] > 0 else 0, + "correct": stats["correct"], + "total": stats["total"] + } for subject, stats in subject_stats.items() + }, + "question_type": { + question_type: { + "accuracy": stats["correct"] / stats["total"] if stats["total"] > 0 else 0, + "correct": stats["correct"], + "total": stats["total"] + } for question_type, stats in type_stats.items() + }, + "category": { + subject:{ + category: { + "accuracy": stats["correct"] / stats["total"] if stats["total"] > 0 else 0, + "correct": stats["correct"], + "total": stats["total"] + } for category, stats in categories.items() + }for subject, categories in category_stats.items() + }, + "task": { + task: { + "accuracy": stats["correct"] / stats["total"] if stats["total"] > 0 else 0, + "correct": stats["correct"], + "total": stats["total"] + } for task, stats in task_stats.items() + } + } + + with open(output_file, "w") as f: + f.write(json.dumps(score, indent=2)) + +def main(): + parser = argparse.ArgumentParser() + # output + parser.add_argument('--results_dir', type=str, default='') + args = parser.parse_args() + for root, dirs, files in os.walk(args.results_dir): + for file in files: + if file.endswith(".json") and not file.endswith("_result.json"): + gen_score(os.path.join(root, file), os.path.join(root, file).replace('.json', '_result.json')) + + +if __name__ == "__main__": + logging.basicConfig( + level=os.environ.get("LOGLEVEL", "INFO").upper(), + format="[%(name)s] %(message)s", + datefmt="[%X]" + ) + logger_blocklist = [ + "asyncio", + "azure", + "azureml", + "datasets", + "httpx", + "httpcore", + "filelock", + "fsspec", + "msal", + "msrest", + "openai", + "PIL", + "urllib3", + ] + for module in logger_blocklist: + logging.getLogger(module).setLevel(logging.WARNING) + + main() \ No newline at end of file diff --git a/EMMA/evaluation/evaluate.py b/EMMA/evaluation/evaluate.py new file mode 100644 index 0000000000000000000000000000000000000000..e226383a3c4ed679dfca6512d797e482da4f17c4 --- /dev/null +++ b/EMMA/evaluation/evaluate.py @@ -0,0 +1,200 @@ +import argparse +import json +import logging +import os +from tqdm import tqdm +from .utils import * +import re +import time + + +def fast_extract_answer(response) : + response = response.strip() + response = process_answer(response) + # Direct Strategy Multi-Choice + # A / A: / A. + for ch in 'ABCDEFGH': + if response.upper() == ch or response.startswith(f'{ch}:') or response.startswith(f'{ch}.'): + return ch + + # Direct Strategy Open-ended + # 1 + if is_number(response): + return response + + # CoT strategy + if 'boxed{' in response: + try: + model_answers = extract_full_boxed_content(response) + if model_answers: + # for coding + # \\boxed{\\text{}} + try: + text_content = re.findall(r'\\text{(.*?)}', model_answers[-1]) + if text_content: + return text_content[-1].strip() + except Exception: + pass + return model_answers[-1].strip() + except Exception: + pass + + # for Coding + # the correct answer is\n D. + for flag in ['final answer is', 'correct answer is', 'answer should be', 'answer is', 'answer:']: + if flag in response.lower(): + try: + model_answer = response.lower().split(flag)[-1].strip() + return model_answer.split('\n')[0].split('.')[0] + except Exception: + pass + + return "" + + +def create_test_prompt(score_prompt, problem, label): + score_prompt = score_prompt.strip() + response = problem[label] + answer = problem['answer'] + full_prompt = f'{score_prompt}\n' + f'Response: {response}\n' + f'Answer: {answer}\n' + 'Correct_or_not:' + return full_prompt + + +def call_gpt(client, model, user_prompt): + attempt = 0 + while attempt < 5: + try: + response = client.chat.completions.create( + model=model, + messages=[ + {"role": "user", "content": user_prompt} + ] + ) + return response.choices[0].message.content.strip() + except Exception as e: + logging.error(f"Attempt {attempt + 1} failed: {e}") + + if 'error' in str(e) and 'message' in str(e): + error_message = str(e) + if 'The server had an error processing your request.' in error_message: + sleep_time = 30 + logging.error(f"Server error, retrying in {sleep_time}s...") + time.sleep(sleep_time) + elif 'Please try again in ' in error_message: + sleep_time = float(error_message.split('Please try again in ')[1].split('s.')[0]) + logging.error(f"Rate limit exceeded, retrying in {sleep_time * 2}s...") + time.sleep(sleep_time * 2) + else: + print("Unknown error, skipping this request.") + break + attempt += 1 + + +def gen_true_false(answer_file, response_label='response', gpt_eval=False, model="", api_key="", rerun=True, save_every=20, logger=logging.getLogger(__name__)): + logger.info(f"Reading {answer_file}.....") + label = response_label + if gpt_eval: + from openai import OpenAI + client = OpenAI(api_key=api_key) + with open(answer_file, "r") as f: + results = json.load(f) + full_pids = list(results.keys()) + + skip_pids = [] + # for pid, problem in results.items(): + # flag = problem.get('true_false') + # if flag is not None: + # skip_pids.append(problem['pid']) + + if rerun: + test_pids = full_pids + else: + if len(skip_pids) > 0: + logger.info( + f"Found existing results file with {len(skip_pids)} problems with valid responses. Skipping these problems..." + ) + test_pids = [pid for pid in full_pids if pid not in skip_pids] + + logger.info(f"Number of test problems to run: {len(test_pids)}") + + for i, pid in enumerate(tqdm(test_pids)): + problem = results[pid] + flag = False + if label not in problem or not problem[label]: + results[pid]['extraction'] = None + results[pid]['true_false'] = False + continue + + if gpt_eval: + user_prompt = create_test_prompt(score_demo_prompt, problem, label) + flag_cache = call_gpt(client, model, user_prompt) + results[pid]['gpt_eval'] = flag_cache + if flag_cache.lower() == 'correct': + flag = True + else: + flag = False + else: + model_answer = fast_extract_answer(problem[label]) + results[pid]['extraction'] = model_answer + if is_equal(model_answer, results[pid]['answer']) or is_equal(model_answer, results[pid]['gt_content']): + flag = True + + results[pid]['true_false'] = flag + + if (i % save_every == 0 and i > 0) or i == len(test_pids) - 1: + with open(answer_file, "w") as f: + f.write(json.dumps(results, indent=2)) + logger.info(f"Saved results to {answer_file}") + + with open(answer_file, "w") as f: + f.write(json.dumps(results, indent=2)) + logger.info(f"Saved results to {answer_file}") + + +def main(): + parser = argparse.ArgumentParser() + + parser.add_argument('--results_dir', type=str, default='') + parser.add_argument('--response_label', type=str, default='response', help='response label for the input file') + parser.add_argument('--rerun', action='store_true', help='rerun the answer extraction') + parser.add_argument('--save_every', type=int, default=10, help='save every n problems') + + parser.add_argument('--gpt_eval', action='store_true', help='use gpt to evaluate') + parser.add_argument('--api_key', type=str, default="") + parser.add_argument('--model', type=str, default="chatgpt-4o-latest") + + args = parser.parse_args() + + logging.info("Starting to extract answers.......") + + for root, dirs, files in os.walk(args.results_dir): + for file in files: + if file.endswith(".json") and not file.endswith("_result.json"): + gen_true_false(os.path.join(root, file), args) + + +if __name__ == "__main__": + logging.basicConfig( + level=os.environ.get("LOGLEVEL", "INFO").upper(), + format="[%(name)s] %(message)s", + datefmt="[%X]" + ) + logger_blocklist = [ + "asyncio", + "azure", + "azureml", + "datasets", + "httpx", + "httpcore", + "filelock", + "fsspec", + "msal", + "msrest", + "openai", + "PIL", + "urllib3", + ] + for module in logger_blocklist: + logging.getLogger(module).setLevel(logging.WARNING) + + main() \ No newline at end of file diff --git a/EMMA/evaluation/utils.py b/EMMA/evaluation/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..a9879d7fe53f086ab57e217288ceaa99ff536e20 --- /dev/null +++ b/EMMA/evaluation/utils.py @@ -0,0 +1,108 @@ +from latex2sympy2 import latex2sympy +import re +from sympy import simplify +from word2number import w2n + + +def verify_extraction(extraction): + extraction = extraction.strip() + if extraction == "" or extraction == None: + return False + return True + + +def is_number(s): + try: + float(s) + return True + except ValueError: + return False + + +def process_answer(answer): + answer_pattern = re.compile(r'(.*?)') + answer = answer.split('### Final Answer ###')[-1].strip() if '### Final Answer ###' in answer else answer + answer = answer.split('Answer:')[-1].strip() if 'Answer:' in answer else answer + matches = re.findall(answer_pattern, answer) + answer = matches[-1] if matches else answer + return answer + +def extract_full_boxed_content(s): + """ + Extract the full content inside \boxed{}, handling nested braces {{}} properly. + """ + results = [] + + i = 0 + while i < len(s): + if s[i:i + 7] == r'\boxed{': + brace_stack = [] + start = i + 7 + i = start + + while i < len(s): + if s[i] == '{': + brace_stack.append(i) + elif s[i] == '}': + if brace_stack: + brace_stack.pop() + else: + results.append(s[start:i]) + break + i += 1 + i += 1 + + return results + + +def is_equal(md_ans, gt_ans): + + md_ans = md_ans.lower() + gt_ans = gt_ans.lower() + + if md_ans.strip() == gt_ans.strip(): + return True + + try: + md_ans_cache = str(w2n.word_to_num(md_ans)) + if md_ans_cache.strip() == gt_ans.strip(): + return True + except ValueError: + pass + + # For Math + try: + # Parse LaTeX expressions into sympy and compare numerical values + md_sympy = latex2sympy(md_ans) + gt_sympy = latex2sympy(gt_ans) + + # Compare evaluated results, rounded to 2 decimal places + if round(float(md_sympy.evalf()), 2) == round(float(gt_sympy.evalf()), 2): + return True + + # Additionally, compare simplified symbolic expressions + if simplify(md_sympy - gt_sympy) == 0: + return True + except Exception: + pass # Ignore parsing errors or evaluation failures + + return False + + +score_demo_prompt = """Please read the following example. Then determine whether the response is correct and type it +at the end of the prompt. It is worth noting that the final answer in the response is usually in \\boxed{}, +You only need to compare the final answer in the response with the answer, without considering the logical +correctness of the response itself. + +Response: The correct answer is:\n\nA + +Answer: A + +Correct_or_not: Correct + +Response: The correct option is:\n\n\\[\n\\boxed{E}\n\\] + +Answer: C + +Correct_or_not: Incorrect +""" \ No newline at end of file diff --git a/EMMA/generate_response.py b/EMMA/generate_response.py new file mode 100644 index 0000000000000000000000000000000000000000..efd335fffc81e6623b205aca209db1428076af98 --- /dev/null +++ b/EMMA/generate_response.py @@ -0,0 +1,194 @@ +import argparse +import json +import os +import logging +from tqdm import tqdm +from models.qwen import Qwen_vllm_Model +from datasets import load_dataset, concatenate_datasets +from data_utils import load_yaml, verify_response, build_query + +def do_generate(dataset_name, model_path, output_path, subject=['Math', 'Physics', 'Chemistry', 'Coding'], split='test', config_path='/user/konglingyu/VLMEvalKit/EMMA/configs/gpt.yaml', strategy='TrainCoT', save_every=20, rerun=False, greedy=0, max_tokens=4096, ngpu=1, logger=logging.getLogger(__name__), seed=42): + # Load Dataset + logger.info(f"Loading dataset {dataset_name}, subject: {subject}") + sub_dataset_list = [] + for subj in subject: + sub_dataset = load_dataset(dataset_name, subj, split=split) + sub_dataset_list.append(sub_dataset) + dataset = concatenate_datasets(sub_dataset_list) + + # Load Config + logger.info(f"Loading config") + config = load_yaml(config_path) + + # Load Model + # If we were given a custom path, load that model, otherwise use a remote service model + logger.info(f"Loading local model {model_path}") + device = 0 + world_size = 1 + try: + device = int(os.environ["LOCAL_RANK"]) + world_size = int(os.environ["WORLD_SIZE"]) + dist_keys = [ + "RANK", + "LOCAL_RANK", + "WORLD_SIZE", + "LOCAL_WORLD_SIZE", + "GROUP_RANK", + "ROLE_RANK", + "ROLE_NAME", + "OMP_NUM_THREADS", + "MASTER_ADDR", + "MASTER_PORT", + "TORCHELASTIC_USE_AGENT_STORE", + "TORCHELASTIC_MAX_RESTARTS", + "TORCHELASTIC_RUN_ID", + "TORCH_NCCL_ASYNC_ERROR_HANDLING", + "TORCHELASTIC_ERROR_FILE", + ] + + for dist_key in dist_keys: + del os.environ[dist_key] + except: + pass + + if world_size > 1: + assert ngpu==1 + + model = Qwen_vllm_Model(model_path, greedy=greedy, max_tokens=max_tokens, parallel=ngpu, seed=seed, device=device) + + logger.info(f"Model loaded!") + + if world_size > 1: + logger.info(f"Using distributed mode with {world_size} GPUs, device {device}") + output_path = output_path.replace('.json', f'_{device}.json') + else: + logger.info(f"Using single GPU mode") + logger.info(f"Output path: {output_path}") + + if os.path.exists(output_path): + logger.info("Results already exists.") + logger.info(f"Reading {output_path}") + with open(output_path, 'r') as f: + results = json.load(f) + else: + results = {} + + skip_pids = [] + if not rerun and results: + for pid, data in results.items(): + if 'response' in data and verify_response(data['response']): + skip_pids.append(pid) + + if len(skip_pids) > 0: + logger.info( + f"Found existing results file with {len(skip_pids)} problems with valid responses. Skipping these problems...") + + logger.info(f"Starting to generate.....") + for idx, sample in enumerate(tqdm(dataset)): + pid = sample['pid'] + if skip_pids and pid in skip_pids: + continue + if idx % world_size != device: + continue + sample = build_query(sample, config, strategy) + problem: dict = sample.copy() + for i in range(1, 6): + problem.pop('image_' + str(i)) + + try: + response = model.get_response(sample) + results[pid] = problem + results[pid]['response'] = response + except Exception as e: + logger.error(f"Error in generating answer for {pid}") + logger.error(e) + results[pid] = problem + results[pid]['error'] = str(e) + + if idx == 2 or (idx % save_every == 0 and idx > 0) or idx == len(dataset) - 1: + try: + with open(output_path, 'w') as f: + f.write(json.dumps(results, indent=2)) + logger.info(f"Save results to {output_path}") + except Exception as e: + logger.info(f"Error in saving {output_path}") + logger.info(e) + + with open(output_path, 'w') as f: + f.write(json.dumps(results, indent=2)) + logger.info(f"Save results to {output_path}") + + logger.info("End Generation......") + +def main(): + parser = argparse.ArgumentParser() + parser.add_argument('--dataset_name', type=str, default='/root/LMUData/EMMA-mini') + parser.add_argument('--subject', nargs='+', type=str, default=['Math', 'Physics', 'Chemistry', 'Coding']) + parser.add_argument('--split', type=str, default='test') + parser.add_argument('--strategy', type=str, default='CoT', choices=['CoT', 'Direct', 'TrainCoT']) + parser.add_argument('--config_path', type=str, default="configs/gpt.yaml") + parser.add_argument('--output_path', type=str, default='results/test-full.json') + parser.add_argument('--save_every', type=int, default=20, help='save every n problems') + parser.add_argument('--rerun', action='store_true', help='rerun the answer generation') + # Local model + parser.add_argument('--model_path', type=str, default='/user/konglingyu/ckpts/Qwen2-VL-7B', help="local model path or huggingface model name") + parser.add_argument('--max_tokens', type=int, default=4096) + parser.add_argument('--greedy', type=int, default=0) + parser.add_argument('--ngpu', type=int, default=1) + + args = parser.parse_args() + do_generate( + dataset_name=args.dataset_name, + model_path=args.model_path, + output_path=args.output_path, + subject=args.subject, + split=args.split, + config_path=args.config_path, + strategy=args.strategy, + save_every=args.save_every, + rerun=args.rerun, + greedy=args.greedy, + max_tokens=args.max_tokens, + ngpu=args.ngpu + ) + + + +if __name__ == "__main__": + logging.basicConfig( + level=os.environ.get("LOGLEVEL", "INFO").upper(), + format="[%(name)s] %(message)s", + datefmt="[%X]" + ) + logger_blocklist = [ + "asyncio", + "azure", + "azureml", + "datasets", + "httpx", + "httpcore", + "filelock", + "fsspec", + "msal", + "msrest", + "openai", + "PIL", + "urllib3", + ] + for module in logger_blocklist: + logging.getLogger(module).setLevel(logging.WARNING) + if not os.path.exists("/root/LMUData"): + os.symlink("/user/konglingyu/LMUData", "/root/LMUData") + main() + + + + + + + + + + + + diff --git a/EMMA/models/__init__.py b/EMMA/models/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/EMMA/models/claude.py b/EMMA/models/claude.py new file mode 100644 index 0000000000000000000000000000000000000000..b7866a16e5ec3ca82bac0b97ed1230e264d044e2 --- /dev/null +++ b/EMMA/models/claude.py @@ -0,0 +1,81 @@ +import logging +import re +import base64 +from io import BytesIO + +from anthropic import Anthropic + + +def encode_image_to_base64(image): + buffered = BytesIO() + image.save(buffered, format="PNG") + img_str = base64.b64encode(buffered.getvalue()).decode("utf-8") + return img_str + + +def create_message(sample): + query = sample['query'] + all_contents = [] + matches = re.findall(r"<(image_\d+)>", query) + split_text = re.split(r"", query) + for i, fragment in enumerate(split_text): + if fragment.strip(): + all_contents.extend([ + {"type": "text", "text": fragment} + ]) + if i < len(matches): + if sample[matches[i]]: + img_base64 = encode_image_to_base64(sample[matches[i]]) + all_contents.extend([ + { + "type": "image", + "source": { + "type": "base64", + "media_type": "image/png", + "data": img_base64 + } + } + ]) + else: + logging.error( + f"The image token {matches[i]} is in the query, but there is no corresponding image provided by the data") + + messages = [ + { + "role": "user", + "content": all_contents + } + ] + return messages + + +# build claude class +class Claude_Model(): + def __init__( + self, + client: Anthropic, + model="claude-3-5-sonnet-latest", + temperature=0, + max_tokens=1024 + ): + self.client = client + self.model = model + self.temperature = temperature + self.max_tokens = max_tokens + + def get_response(self, sample): + messages = create_message(sample) + try: + + v_response = self.client.messages.create( + model=self.model, + max_tokens=self.max_tokens, + temperature=self.temperature, + messages=messages + ) + response = v_response.content[0].text + + return response + except Exception as e: + print(e) + return None diff --git a/EMMA/models/gpt.py b/EMMA/models/gpt.py new file mode 100644 index 0000000000000000000000000000000000000000..29e71f5e10e7ab670c15a5e5c3f8761dab7cc5f4 --- /dev/null +++ b/EMMA/models/gpt.py @@ -0,0 +1,103 @@ +import logging +import re +import base64 +from io import BytesIO +import time + +from openai import OpenAI + + +def encode_image_to_base64(image): + buffered = BytesIO() + image.save(buffered, format="PNG") + img_str = base64.b64encode(buffered.getvalue()).decode("utf-8") + return img_str + + +def create_message(sample): + query = sample['query'] + all_contents = [] + matches = re.findall(r"<(image_\d+)>", query) + split_text = re.split(r"", query) + for i, fragment in enumerate(split_text): + if fragment.strip(): + all_contents.extend([ + {"type": "text", "text": fragment} + ]) + if i < len(matches): + if sample[matches[i]]: + img_base64 = encode_image_to_base64(sample[matches[i]]) + all_contents.extend([ + { + "type": "image_url", + "image_url": { + "url": f"data:image/png;base64,{img_base64}" + } + } + ]) + else: + logging.error( + f"The image token {matches[i]} is in the query, but there is no corresponding image provided by the data") + + messages = [ + { + "role": "user", + "content": all_contents + } + ] + return messages + + +# build gpt class +class GPT_Model: + def __init__( + self, + client: OpenAI, + model="chatgpt-4o-latest", + temperature=0, + max_tokens=1024, + retry_attempts = 5 + ): + self.client = client + self.model = model + self.temperature = temperature + self.max_tokens = max_tokens + self.retry_attempts = retry_attempts + + def get_response(self, sample): + attempt = 0 + messages = create_message(sample) + + while attempt < self.retry_attempts: + try: + response = self.client.chat.completions.create( + model=self.model, + messages=messages, + temperature=self.temperature, + max_tokens=self.max_tokens, + ) + + return response.choices[0].message.content.strip() + except Exception as e: + logging.error(f"Attempt {attempt + 1} failed: {e}") + + if 'error' in str(e) and 'message' in str(e): + error_message = str(e) + if 'The server had an error processing your request.' in error_message: + sleep_time = 30 + logging.error(f"Server error, retrying in {sleep_time}s...") + time.sleep(sleep_time) + elif 'Please try again in ' in error_message: + sleep_time = float(error_message.split('Please try again in ')[1].split('s.')[0]) + logging.error(f"Rate limit exceeded, retrying in {sleep_time * 2}s...") + time.sleep(sleep_time * 2) + elif 'RESOURCE_EXHAUSTED' in error_message: + sleep_time = 30 + logging.error(f"Gemini rate limit, retrying in {sleep_time}s...") + time.sleep(sleep_time) + else: + print("Unknown error, skipping this request.") + break + attempt += 1 + + return None diff --git a/EMMA/models/internvl.py b/EMMA/models/internvl.py new file mode 100644 index 0000000000000000000000000000000000000000..e8ec31716a1b633c784599721c13e063ba5b81dd --- /dev/null +++ b/EMMA/models/internvl.py @@ -0,0 +1,172 @@ +import re +import logging + +import torch +import torchvision.transforms as T +from torchvision.transforms.functional import InterpolationMode +from transformers import AutoModel, AutoTokenizer +import math + +IMAGENET_MEAN = (0.485, 0.456, 0.406) +IMAGENET_STD = (0.229, 0.224, 0.225) + + +def split_model(model_name): + device_map = {} + world_size = torch.cuda.device_count() + num_layers = { + 'InternVL2-1B': 24, 'InternVL2-2B': 24, 'InternVL2-4B': 32, 'InternVL2-8B': 32, + 'InternVL2-26B': 48, 'InternVL2-40B': 60, 'InternVL2-Llama3-76B': 80}[model_name] + # Since the first GPU will be used for ViT, treat it as half a GPU. + num_layers_per_gpu = math.ceil(num_layers / (world_size - 0.5)) + num_layers_per_gpu = [num_layers_per_gpu] * world_size + num_layers_per_gpu[0] = math.ceil(num_layers_per_gpu[0] * 0.5) + layer_cnt = 0 + for i, num_layer in enumerate(num_layers_per_gpu): + for j in range(num_layer): + device_map[f'language_model.model.layers.{layer_cnt}'] = i + layer_cnt += 1 + device_map['vision_model'] = 0 + device_map['mlp1'] = 0 + device_map['language_model.model.tok_embeddings'] = 0 + device_map['language_model.model.embed_tokens'] = 0 + device_map['language_model.output'] = 0 + device_map['language_model.model.norm'] = 0 + device_map['language_model.lm_head'] = 0 + device_map[f'language_model.model.layers.{num_layers - 1}'] = 0 + + return device_map + + +def build_transform(input_size): + MEAN, STD = IMAGENET_MEAN, IMAGENET_STD + transform = T.Compose([ + T.Lambda(lambda img: img.convert('RGB') if img.mode != 'RGB' else img), + T.Resize((input_size, input_size), interpolation=InterpolationMode.BICUBIC), + T.ToTensor(), + T.Normalize(mean=MEAN, std=STD) + ]) + return transform + + +def find_closest_aspect_ratio(aspect_ratio, target_ratios, width, height, image_size): + best_ratio_diff = float('inf') + best_ratio = (1, 1) + area = width * height + for ratio in target_ratios: + target_aspect_ratio = ratio[0] / ratio[1] + ratio_diff = abs(aspect_ratio - target_aspect_ratio) + if ratio_diff < best_ratio_diff: + best_ratio_diff = ratio_diff + best_ratio = ratio + elif ratio_diff == best_ratio_diff: + if area > 0.5 * image_size * image_size * ratio[0] * ratio[1]: + best_ratio = ratio + return best_ratio + + +def dynamic_preprocess(image, min_num=1, max_num=12, image_size=448, use_thumbnail=False): + orig_width, orig_height = image.size + aspect_ratio = orig_width / orig_height + + # calculate the existing image aspect ratio + target_ratios = set( + (i, j) for n in range(min_num, max_num + 1) for i in range(1, n + 1) for j in range(1, n + 1) if + i * j <= max_num and i * j >= min_num) + target_ratios = sorted(target_ratios, key=lambda x: x[0] * x[1]) + + # find the closest aspect ratio to the target + target_aspect_ratio = find_closest_aspect_ratio( + aspect_ratio, target_ratios, orig_width, orig_height, image_size) + + # calculate the target width and height + target_width = image_size * target_aspect_ratio[0] + target_height = image_size * target_aspect_ratio[1] + blocks = target_aspect_ratio[0] * target_aspect_ratio[1] + + # resize the image + resized_img = image.resize((target_width, target_height)) + processed_images = [] + for i in range(blocks): + box = ( + (i % (target_width // image_size)) * image_size, + (i // (target_width // image_size)) * image_size, + ((i % (target_width // image_size)) + 1) * image_size, + ((i // (target_width // image_size)) + 1) * image_size + ) + # split the image + split_img = resized_img.crop(box) + processed_images.append(split_img) + assert len(processed_images) == blocks + if use_thumbnail and len(processed_images) != 1: + thumbnail_img = image.resize((image_size, image_size)) + processed_images.append(thumbnail_img) + return processed_images + + +def load_image(image, input_size=448, max_num=12): + transform = build_transform(input_size=input_size) + images = dynamic_preprocess(image, image_size=input_size, use_thumbnail=True, max_num=max_num) + pixel_values = [transform(image) for image in images] + pixel_values = torch.stack(pixel_values) + return pixel_values + + +def process_query(sample): + query = sample['query'] + matches = re.findall(r"<(image_\d+)>", query) + modified_query = re.sub(r"", "", query) + images = [] + for match in matches: + if sample[match]: + images.append(sample[match]) + else: + logging.error(f"The image token <{match}> is in the query, but there is no corresponding image provided by the data") + return modified_query, images + + +class Internvl_Model: + def __init__( + self, + model_path, + temperature=0, + max_tokens=1024 + ): + self.temperature = temperature + self.max_tokens = max_tokens + self.device_map = split_model('InternVL2-Llama3-76B') + self.model = AutoModel.from_pretrained( + model_path, + torch_dtype=torch.bfloat16, + low_cpu_mem_usage=True, + use_flash_attn=True, + trust_remote_code=True, + device_map=self.device_map).eval() + self.tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True, use_fast=False) + + def get_response(self, sample): + model = self.model + tokenizer = self.tokenizer + + try: + query, images = process_query(sample) + pixel_values_list = [] + num_patches_list = [] + + for image in images: + pixel_value = load_image(image, max_num=12).to(torch.bfloat16).cuda() + pixel_values_list.append(pixel_value) + + num_patches_list.append(pixel_value.size(0)) + + pixel_values = torch.cat(pixel_values_list, dim=0) + + generation_config = dict(max_new_tokens=self.max_tokens, do_sample=True, temperature=self.temperature) + + # single-image single-round conversation + response = model.chat(tokenizer, pixel_values, query, generation_config, + num_patches_list=num_patches_list) + return response + except Exception as e: + print(e) + return None diff --git a/EMMA/models/llava.py b/EMMA/models/llava.py new file mode 100644 index 0000000000000000000000000000000000000000..8fab08bf3f5cdb4729e511e7737443d4ee677ed0 --- /dev/null +++ b/EMMA/models/llava.py @@ -0,0 +1,82 @@ +import re +import logging + +import torch +from transformers import AutoProcessor, LlavaOnevisionForConditionalGeneration + +def create_message(sample): + query = sample['query'] + all_contents = [] + matches = re.findall(r"<(image_\d+)>", query) + split_text = re.split(r"", query) + images = [] + for i, fragment in enumerate(split_text): + if fragment.strip(): + all_contents.extend([ + {"type": "text", "text": fragment} + ]) + if i < len(matches): + if sample[matches[i]]: + all_contents.extend([ + {"type": "image"} + ]) + images.append(sample[matches[i]]) + else: + logging.error( + f"The image token {matches[i]} is in the query, but there is no corresponding image provided by the data") + messages = [ + { + "role": "user", + "content": all_contents + } + ] + return messages, images + + +class Llava_Model: + def __init__( + self, + model_path, + temperature=0, + max_tokens=1024 + ): + self.temperature = temperature + self.max_tokens = max_tokens + self.model = LlavaOnevisionForConditionalGeneration.from_pretrained( + model_path, + torch_dtype=torch.float16, + device_map="auto", + use_flash_attention_2=True + ) + self.processor = AutoProcessor.from_pretrained(model_path) + + + def get_response(self, sample): + + model = self.model + processor = self.processor + + try: + messages, images = create_message(sample) + + input_text = processor.apply_chat_template(messages, add_generation_prompt=True) + inputs = processor( + images=images, + text=input_text, + add_special_tokens=False, + return_tensors="pt" + ).to(model.device, torch.float16) + + output = model.generate(**inputs, do_sample=True, temperature=self.temperature, max_new_tokens=self.max_tokens) + response = processor.decode(output[0], skip_special_tokens=True) + + assistant_index = response.find("assistant") + if assistant_index != -1: + final_answer = response[assistant_index + len("assistant"):].strip() + else: + final_answer = response.strip() + return final_answer + + except Exception as e: + print(e) + return None \ No newline at end of file diff --git a/EMMA/models/qwen.py b/EMMA/models/qwen.py new file mode 100644 index 0000000000000000000000000000000000000000..fdd85a40d42dd2ef1974ac5df792dede106e2151 --- /dev/null +++ b/EMMA/models/qwen.py @@ -0,0 +1,212 @@ +import re +import logging +import base64 +from io import BytesIO +import os + +from transformers import Qwen2VLForConditionalGeneration, AutoTokenizer, AutoProcessor +from qwen_vl_utils import process_vision_info +import torch +from transformers import Qwen2_5_VLForConditionalGeneration, AutoProcessor +from vllm import LLM, SamplingParams + +def encode_image_to_base64(image): + buffered = BytesIO() + image.save(buffered, format="PNG") + img_str = base64.b64encode(buffered.getvalue()).decode("utf-8") + return img_str + +def create_message(sample): + query = sample['query'] + all_contents = [] + matches = re.findall(r"<(image_\d+)>", query) + split_text = re.split(r"", query) + for i, fragment in enumerate(split_text): + if fragment.strip(): + all_contents.extend([ + {"type": "text", "text": fragment} + ]) + if i < len(matches): + if sample[matches[i]]: + img_base64 = encode_image_to_base64(sample[matches[i]]) + all_contents.extend([ + { + "type": "image", + "image": f"data:image/png;base64,{img_base64}" + } + ]) + else: + logging.error( + f"The image token {matches[i]} is in the query, but there is no corresponding image provided by the data") + + messages = [ + { + "role": "user", + "content": all_contents + } + ] + return messages + +class Qwen_Model: + def __init__( + self, + model_path, + temperature=0, + max_tokens=1024 + ): + self.model_path = model_path + self.temperature = temperature + self.max_tokens = max_tokens + self.model = Qwen2VLForConditionalGeneration.from_pretrained(self.model_path, torch_dtype=torch.bfloat16, + attn_implementation="flash_attention_2", + device_map="auto", ) + self.processor = AutoProcessor.from_pretrained(self.model_path) + + + def get_response(self, sample): + + model = self.model + processor = self.processor + + try: + messages = create_message(sample) + + text = processor.apply_chat_template( + messages, tokenize=False, add_generation_prompt=True, add_vision_id=True + ) + image_inputs, video_inputs = process_vision_info(messages) + inputs = processor( + text=[text], + images=image_inputs, + videos=video_inputs, + padding=True, + return_tensors="pt", + ) + inputs = inputs.to("cuda") + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=self.max_tokens, temperature=self.temperature) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + response = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + + return response[0] + except Exception as e: + print(e) + return None + + + +class Qwen2_5_Model: + def __init__( + self, + model_path="Qwen/Qwen2.5-VL-72B-Instruct", + temperature=0, + max_tokens=1024 + ): + self.model_path = model_path + self.temperature = temperature + self.max_tokens = max_tokens + + self.model = Qwen2_5_VLForConditionalGeneration.from_pretrained( + self.model_path, + torch_dtype=torch.bfloat16, + attn_implementation="flash_attention_2", + device_map="auto" + ) + + self.processor = AutoProcessor.from_pretrained(self.model_path) + + + def get_response(self, sample): + + model = self.model + processor = self.processor + + try: + messages = create_message(sample) + + text = processor.apply_chat_template( + messages, tokenize=False, add_generation_prompt=True, add_vision_id=True + ) + image_inputs, video_inputs = process_vision_info(messages) + inputs = processor( + text=[text], + images=image_inputs, + videos=video_inputs, + padding=True, + return_tensors="pt", + ) + inputs = inputs.to("cuda") + + # Inference: Generation of the output + generated_ids = model.generate(**inputs, max_new_tokens=self.max_tokens, temperature=self.temperature) + generated_ids_trimmed = [ + out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) + ] + response = processor.batch_decode( + generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + + return response[0] + except Exception as e: + print(e) + return None + +class Qwen_vllm_Model: + def __init__( + self, + model_path, + greedy=1, + max_tokens=1024, + parallel=1, + seed=42, + device=0 + ): + self.model_path = model_path + self.max_tokens = max_tokens + + self.model = LLM( + model=model_path, + enable_prefix_caching=True, + trust_remote_code=True, + limit_mm_per_prompt={"image": 8, "video": 1}, + tensor_parallel_size=parallel, + device=device + ) + self.sampling_params = SamplingParams( + temperature=0 if greedy else 1, + top_p=0.001 if greedy else 1, + top_k=1 if greedy else -1, + repetition_penalty=1, + max_tokens=max_tokens, + stop_token_ids=[], + seed=seed + ) + self.processor = AutoProcessor.from_pretrained(self.model_path) + + + def get_response(self, sample): + try: + messages = create_message(sample) + + text = self.processor.apply_chat_template(messages, tokenize=False, add_generation_prompt=True) + image_inputs, _ = process_vision_info([messages]) + inputs = { + "prompt": text, + "multi_modal_data": {'image': image_inputs}, + } + + out = self.model.generate( + inputs, + sampling_params=self.sampling_params, + use_tqdm=False + ) + response = out[0].outputs[0].text + return response + except Exception as e: + print(e) + return None \ No newline at end of file diff --git a/EMMA/scripts/evaluation_fast.sh b/EMMA/scripts/evaluation_fast.sh new file mode 100644 index 0000000000000000000000000000000000000000..6efab4198be24fc6764326dbc96ecbed259d02d8 --- /dev/null +++ b/EMMA/scripts/evaluation_fast.sh @@ -0,0 +1,4 @@ +python evaluate.py \ +--results_dir 'path_to_your_results_dir' \ +--response_label 'response' \ +--save_every 20 diff --git a/EMMA/scripts/evaluation_llm.sh b/EMMA/scripts/evaluation_llm.sh new file mode 100644 index 0000000000000000000000000000000000000000..53c9bebd29d840bdd3110ac9ceccc021382b1f8f --- /dev/null +++ b/EMMA/scripts/evaluation_llm.sh @@ -0,0 +1,7 @@ +python evaluate.py \ +--results_dir 'path_to_your_results_dir' \ +--response_label 'response' \ +--save_every 20 \ +--gpt_eval \ +--api_key '' \ +--model 'chatgpt-4o-latest' diff --git a/EMMA/scripts/run_closesource.sh b/EMMA/scripts/run_closesource.sh new file mode 100644 index 0000000000000000000000000000000000000000..88add0cc9f25866eebb56d8779bdd11f6eae1de7 --- /dev/null +++ b/EMMA/scripts/run_closesource.sh @@ -0,0 +1,47 @@ +#!/bin/bash + python generate_response.py \ + --dataset_name 'luckychao/EMMA' \ + --split 'test' \ + --subject 'Math' 'Physics' 'Chemistry' 'Coding' \ + --strategy 'CoT' \ + --config_path 'configs/gpt.yaml' \ + --model 'remote-model-name' \ + --api_key '' \ + --output_path 'path_to_output_file' \ + --max_tokens 4096 \ + --temperature 0 \ + --save_every 20 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/EMMA/scripts/run_opensource.sh b/EMMA/scripts/run_opensource.sh new file mode 100644 index 0000000000000000000000000000000000000000..e82b55e171df896d00a07f486a2fe8e22b3af654 --- /dev/null +++ b/EMMA/scripts/run_opensource.sh @@ -0,0 +1,46 @@ +#!/bin/bash + python generate_response.py \ + --dataset_name 'luckychao/EMMA' \ + --split 'test' \ + --subject 'Math' 'Physics' 'Chemistry' 'Coding' \ + --strategy 'CoT' \ + --config_path 'configs/gpt.yaml' \ + --model_path 'path_to_your_local_model' \ + --output_path 'path_to_output_file' \ + --max_tokens 4096 \ + --temperature 0.7 \ + --save_every 20 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..d67ab032b78b96a9ac3fa03cfe62baf4d78b61dc --- /dev/null +++ b/LICENSE @@ -0,0 +1,203 @@ +Copyright 2023 VLMEvalKit Authors. All rights reserved. + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2023 VLMEvalKit Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/README.md b/README.md new file mode 100644 index 0000000000000000000000000000000000000000..7bd748c81e79dbd24c512ecd72967a94c47f43a3 --- /dev/null +++ b/README.md @@ -0,0 +1,149 @@ +![LOGO](http://opencompass.openxlab.space/utils/MMLB.jpg) + +A Toolkit for Evaluating Large Vision-Language Models. + +[![][github-contributors-shield]][github-contributors-link] • [![][github-forks-shield]][github-forks-link] • [![][github-stars-shield]][github-stars-link] • [![][github-issues-shield]][github-issues-link] • [![][github-license-shield]][github-license-link] + +English | [简体中文](/docs/zh-CN/README_zh-CN.md) | [日本語](/docs/ja/README_ja.md) + +🏆 OC Learderboard • +🏗️Quickstart • +📊Datasets & Models • +🛠️Development + +🤗 HF Leaderboard • +🤗 Evaluation Records • +🤗 HF Video Leaderboard • + +🔊 Discord • +📝 Report • +🎯Goal • +🖊️Citation + + +**VLMEvalKit** (the python package name is **vlmeval**) is an **open-source evaluation toolkit** of **large vision-language models (LVLMs)**. It enables **one-command evaluation** of LVLMs on various benchmarks, without the heavy workload of data preparation under multiple repositories. In VLMEvalKit, we adopt **generation-based evaluation** for all LVLMs, and provide the evaluation results obtained with both **exact matching** and **LLM-based answer extraction**. + +## 🆕 News + +> We have presented a [**comprehensive survey**](https://arxiv.org/pdf/2411.15296) on the evaluation of large multi-modality models, jointly with [**MME Team**](https://github.com/BradyFU/Awesome-Multimodal-Large-Language-Models) and [**LMMs-Lab**](https://lmms-lab.github.io) 🔥🔥🔥 +- **[2025-02-20]** Supported Models: **InternVL2.5 series, QwenVL2.5 series, QVQ-72B, Doubao-VL, Janus-Pro-7B, MiniCPM-o-2.6, InternVL2-MPO, LLaVA-CoT, Hunyuan-Standard-Vision, Ovis2, Valley, SAIL-VL, Ross, Long-VITA, EMU3, SmolVLM**. Supported Benchmarks: **MMMU-Pro, WeMath, 3DSRBench, LogicVista, VL-RewardBench, CC-OCR, CG-Bench, CMMMU, WorldSense**. Please refer to [**VLMEvalKit Features**](https://aicarrier.feishu.cn/wiki/Qp7wwSzQ9iK1Y6kNUJVcr6zTnPe?table=tblsdEpLieDoCxtb) for more details. Thanks to all contributors 🔥🔥🔥 +- **[2024-12-11]** Supported [**NaturalBench**](https://huggingface.co/datasets/BaiqiL/NaturalBench), a vision-centric VQA benchmark (NeurIPS'24) that challenges vision-language models with simple questions about natural imagery. +- **[2024-12-02]** Supported [**VisOnlyQA**](https://github.com/psunlpgroup/VisOnlyQA/), a benchmark for evaluating the visual perception capabilities 🔥🔥🔥 +- **[2024-11-26]** Supported [**Ovis1.6-Gemma2-27B**](https://huggingface.co/AIDC-AI/Ovis1.6-Gemma2-27B), thanks to [**runninglsy**](https://github.com/runninglsy) 🔥🔥🔥 +- **[2024-11-25]** Create a new flag `VLMEVALKIT_USE_MODELSCOPE`. By setting this environment variable, you can download the video benchmarks supported from [**modelscope**](https://www.modelscope.cn) 🔥🔥🔥 +- **[2024-11-25]** Supported [**VizWiz**](https://vizwiz.org/tasks/vqa/) benchmark 🔥🔥🔥 +- **[2024-11-22]** Supported the inference of [**MMGenBench**](https://mmgenbench.alsoai.com), thanks [**lerogo**](https://github.com/lerogo) 🔥🔥🔥 +- **[2024-11-22]** Supported [**Dynamath**](https://huggingface.co/datasets/DynaMath/DynaMath_Sample), a multimodal math benchmark comprising of 501 SEED problems and 10 variants generated based on random seeds. The benchmark can be used to measure the robustness of MLLMs in multi-modal math solving 🔥🔥🔥 +- **[2024-11-21]** Integrated a new config system to enable more flexible evaluation settings. Check the [Document](/docs/en/ConfigSystem.md) or run `python run.py --help` for more details 🔥🔥🔥 +- **[2024-11-21]** Supported [**QSpatial**](https://andrewliao11.github.io/spatial_prompt/), a multimodal benchmark for Quantitative Spatial Reasoning (determine the size / distance, e.g.), thanks [**andrewliao11**](https://github.com/andrewliao11) for providing the official support 🔥🔥🔥 +- **[2024-11-21]** Supported [**MM-Math**](https://github.com/kge-sun/mm-math), a new multimodal math benchmark comprising of ~6K middle school multi-modal reasoning math problems. GPT-4o-20240806 achieces 22.5% accuracy on this benchmark 🔥🔥🔥 + +## 🏗️ QuickStart + +See [[QuickStart](/docs/en/Quickstart.md) | [快速开始](/docs/zh-CN/Quickstart.md)] for a quick start guide. + +## 📊 Datasets, Models, and Evaluation Results + +### Evaluation Results + +**The performance numbers on our official multi-modal leaderboards can be downloaded from here!** + +[**OpenVLM Leaderboard**](https://huggingface.co/spaces/opencompass/open_vlm_leaderboard): [**Download All DETAILED Results**](http://opencompass.openxlab.space/assets/OpenVLM.json). + +Check **Supported Benchmarks** Tab in [**VLMEvalKit Features**](https://aicarrier.feishu.cn/wiki/Qp7wwSzQ9iK1Y6kNUJVcr6zTnPe?table=tblsdEpLieDoCxtb) to view all supported image & video benchmarks (70+). + +Check **Supported LMMs** Tab in [**VLMEvalKit Features**](https://aicarrier.feishu.cn/wiki/Qp7wwSzQ9iK1Y6kNUJVcr6zTnPe?table=tblsdEpLieDoCxtb) to view all supported LMMs, including commercial APIs, open-source models, and more (200+). + +**Transformers Version Recommendation:** + +Note that some VLMs may not be able to run under certain transformer versions, we recommend the following settings to evaluate each VLM: + +- **Please use** `transformers==4.33.0` **for**: `Qwen series`, `Monkey series`, `InternLM-XComposer Series`, `mPLUG-Owl2`, `OpenFlamingo v2`, `IDEFICS series`, `VisualGLM`, `MMAlaya`, `ShareCaptioner`, `MiniGPT-4 series`, `InstructBLIP series`, `PandaGPT`, `VXVERSE`. +- **Please use** `transformers==4.36.2` **for**: `Moondream1`. +- **Please use** `transformers==4.37.0` **for**: `LLaVA series`, `ShareGPT4V series`, `TransCore-M`, `LLaVA (XTuner)`, `CogVLM Series`, `EMU2 Series`, `Yi-VL Series`, `MiniCPM-[V1/V2]`, `OmniLMM-12B`, `DeepSeek-VL series`, `InternVL series`, `Cambrian Series`, `VILA Series`, `Llama-3-MixSenseV1_1`, `Parrot-7B`, `PLLaVA Series`. +- **Please use** `transformers==4.40.0` **for**: `IDEFICS2`, `Bunny-Llama3`, `MiniCPM-Llama3-V2.5`, `360VL-70B`, `Phi-3-Vision`, `WeMM`. +- **Please use** `transformers==4.42.0` **for**: `AKI`. +- **Please use** `transformers==4.44.0` **for**: `Moondream2`, `H2OVL series`. +- **Please use** `transformers==4.45.0` **for**: `Aria`. +- **Please use** `transformers==latest` **for**: `LLaVA-Next series`, `PaliGemma-3B`, `Chameleon series`, `Video-LLaVA-7B-HF`, `Ovis series`, `Mantis series`, `MiniCPM-V2.6`, `OmChat-v2.0-13B-sinlge-beta`, `Idefics-3`, `GLM-4v-9B`, `VideoChat2-HD`, `RBDash_72b`, `Llama-3.2 series`, `Kosmos series`. + +**Torchvision Version Recommendation:** + +Note that some VLMs may not be able to run under certain torchvision versions, we recommend the following settings to evaluate each VLM: + +- **Please use** `torchvision>=0.16` **for**: `Moondream series` and `Aria` + +**Flash-attn Version Recommendation:** + +Note that some VLMs may not be able to run under certain flash-attention versions, we recommend the following settings to evaluate each VLM: + +- **Please use** `pip install flash-attn --no-build-isolation` **for**: `Aria` + +```python +# Demo +from vlmeval.config import supported_VLM +model = supported_VLM['idefics_9b_instruct']() +# Forward Single Image +ret = model.generate(['assets/apple.jpg', 'What is in this image?']) +print(ret) # The image features a red apple with a leaf on it. +# Forward Multiple Images +ret = model.generate(['assets/apple.jpg', 'assets/apple.jpg', 'How many apples are there in the provided images? ']) +print(ret) # There are two apples in the provided images. +``` + +## 🛠️ Development Guide + +To develop custom benchmarks, VLMs, or simply contribute other codes to **VLMEvalKit**, please refer to [[Development_Guide](/docs/en/Development.md) | [开发指南](/docs/zh-CN/Development.md)]. + +**Call for contributions** + +To promote the contribution from the community and share the corresponding credit (in the next report update): + +- All Contributions will be acknowledged in the report. +- Contributors with 3 or more major contributions (implementing an MLLM, benchmark, or major feature) can join the author list of [VLMEvalKit Technical Report](https://www.arxiv.org/abs/2407.11691) on ArXiv. Eligible contributors can create an issue or dm kennyutc in [VLMEvalKit Discord Channel](https://discord.com/invite/evDT4GZmxN). + +Here is a [contributor list](/docs/en/Contributors.md) we curated based on the records. + +## 🎯 The Goal of VLMEvalKit + +**The codebase is designed to:** + +1. Provide an **easy-to-use**, **opensource evaluation toolkit** to make it convenient for researchers & developers to evaluate existing LVLMs and make evaluation results **easy to reproduce**. +2. Make it easy for VLM developers to evaluate their own models. To evaluate the VLM on multiple supported benchmarks, one just need to **implement a single `generate_inner()` function**, all other workloads (data downloading, data preprocessing, prediction inference, metric calculation) are handled by the codebase. + +**The codebase is not designed to:** + +1. Reproduce the exact accuracy number reported in the original papers of all **3rd party benchmarks**. The reason can be two-fold: + 1. VLMEvalKit uses **generation-based evaluation** for all VLMs (and optionally with **LLM-based answer extraction**). Meanwhile, some benchmarks may use different approaches (SEEDBench uses PPL-based evaluation, *eg.*). For those benchmarks, we compare both scores in the corresponding result. We encourage developers to support other evaluation paradigms in the codebase. + 2. By default, we use the same prompt template for all VLMs to evaluate on a benchmark. Meanwhile, **some VLMs may have their specific prompt templates** (some may not covered by the codebase at this time). We encourage VLM developers to implement their own prompt template in VLMEvalKit, if that is not covered currently. That will help to improve the reproducibility. + +## 🖊️ Citation + +If you find this work helpful, please consider to **star🌟** this repo. Thanks for your support! + +[![Stargazers repo roster for @open-compass/VLMEvalKit](https://reporoster.com/stars/open-compass/VLMEvalKit)](https://github.com/open-compass/VLMEvalKit/stargazers) + +If you use VLMEvalKit in your research or wish to refer to published OpenSource evaluation results, please use the following BibTeX entry and the BibTex entry corresponding to the specific VLM / benchmark you used. + +```bib +@inproceedings{duan2024vlmevalkit, + title={Vlmevalkit: An open-source toolkit for evaluating large multi-modality models}, + author={Duan, Haodong and Yang, Junming and Qiao, Yuxuan and Fang, Xinyu and Chen, Lin and Liu, Yuan and Dong, Xiaoyi and Zang, Yuhang and Zhang, Pan and Wang, Jiaqi and others}, + booktitle={Proceedings of the 32nd ACM International Conference on Multimedia}, + pages={11198--11201}, + year={2024} +} +``` + +

🔝Back to top

+ +[github-contributors-link]: https://github.com/open-compass/VLMEvalKit/graphs/contributors +[github-contributors-shield]: https://img.shields.io/github/contributors/open-compass/VLMEvalKit?color=c4f042&labelColor=black&style=flat-square +[github-forks-link]: https://github.com/open-compass/VLMEvalKit/network/members +[github-forks-shield]: https://img.shields.io/github/forks/open-compass/VLMEvalKit?color=8ae8ff&labelColor=black&style=flat-square +[github-issues-link]: https://github.com/open-compass/VLMEvalKit/issues +[github-issues-shield]: https://img.shields.io/github/issues/open-compass/VLMEvalKit?color=ff80eb&labelColor=black&style=flat-square +[github-license-link]: https://github.com/open-compass/VLMEvalKit/blob/main/LICENSE +[github-license-shield]: https://img.shields.io/github/license/open-compass/VLMEvalKit?color=white&labelColor=black&style=flat-square +[github-stars-link]: https://github.com/open-compass/VLMEvalKit/stargazers +[github-stars-shield]: https://img.shields.io/github/stars/open-compass/VLMEvalKit?color=ffcb47&labelColor=black&style=flat-square diff --git a/assets/LOGO.svg b/assets/LOGO.svg new file mode 100644 index 0000000000000000000000000000000000000000..39b62268249b39b1daf1c6e2ab4f0f214051b59c --- /dev/null +++ b/assets/LOGO.svg @@ -0,0 +1,24 @@ + + + +Created with Fabric.js 5.3.0 + + + + + + + + + + + + + VLMEvalKit + \ No newline at end of file diff --git a/do_eval.py b/do_eval.py new file mode 100644 index 0000000000000000000000000000000000000000..570eaf3bc38e18c41d61d62ce553934b607927d6 --- /dev/null +++ b/do_eval.py @@ -0,0 +1,144 @@ +import argparse +import json +import os +from datetime import datetime +import subprocess + +# the emprical settings for each dataset +full_datasets = { + "MathVista_MINI": "train_prompt_sampling", + "MathVision": "train_prompt_greedy", + "MathVerse_MINI": "train_prompt_greedy", + "MMMU_DEV_VAL": "origin_prompt_greedy", + "MMStar": "train_prompt_greedy", + "DynaMath": "train_prompt_greedy", + "WeMath": "train_prompt_greedy", + "TextVQA_VAL": "origin_prompt_greedy", + "DocVQA_TEST": "origin_prompt_greedy", + "MMVet": "origin_prompt_greedy", +} + +settings = { + "train_prompt_sampling": { + "use_reasoning_prompt": 2, + "do_sample": True, + "top_p": 1, + "top_k": -1, + "temperature": 1, + }, + "train_prompt_greedy": { + "use_reasoning_prompt": 2, + "do_sample": True, + "top_p": 0.001, + "top_k": 1, + "temperature": 0.01, + }, + "origin_prompt_greedy": { + "use_reasoning_prompt": 0, + "do_sample": True, + "top_p": 0.001, + "top_k": 1, + "temperature": 0.01, + }, +} + + +def main(): + parser = argparse.ArgumentParser() + + parser.add_argument("--run_name", type=str, required=True, help="Name of the run") + parser.add_argument("--gpus", type=int, default=8, help="Number of GPUs to use") + parser.add_argument("--path", type=str, required=True, help="Path to the model") + parser.add_argument( + "--dataset", type=str, nargs="+", required=True, help="List of datasets to use" + ) + + parser.add_argument( + "--min_pixels", type=int, default=3136, help="Minimum number of pixels" + ) + parser.add_argument( + "--max_pixels", type=int, default=12845056, help="Maximum number of pixels" + ) + parser.add_argument( + "--max_new_tokens", type=int, default=2048, help="Maximum number of new tokens" + ) + + args = parser.parse_args() + assert len(args.dataset), "--dataset should be a list of datasets" + + datasets = args.dataset + if len(args.dataset) == 1 and args.dataset[0] == "full": + datasets = list(full_datasets.keys()) + + for dataset in datasets: + assert ( + dataset in full_datasets + ), f"Dataset {dataset} is not in the list of available datasets: {list(full_datasets.keys())}" + + print("Datasets to be used:", datasets) + print("Run name:", args.run_name) + print("Number of GPUs:", args.gpus) + print("Model path:", args.path) + + for dataset in datasets: + config = { + "model": { + args.run_name: { + "class": "Qwen2VLChat", + "model_path": args.path, + "min_pixels": args.min_pixels, + "max_pixels": args.max_pixels, + "use_vllm": True, + "max_new_tokens": args.max_new_tokens, + **settings[full_datasets[dataset]], + }, + }, + "datasets": datasets, + } + + current_datetime = datetime.now().strftime("%Y%m%d") + save_dir = f"public_eval/{args.run_name}/{dataset}/{current_datetime}" + os.makedirs(save_dir, exist_ok=True) + + config_name = f"config.json" + config_path = os.path.join(save_dir, config_name) + with open(config_path, "w") as json_file: + json.dump(config, json_file, indent=4) + + print(f"Start evaluating on {dataset}.") + print(f"Eval config {full_datasets[dataset]}") + + env_vars = os.environ.copy() + env_vars["VLLM_USE_V1"] = "0" + + command = [ + "torchrun", + f"--nproc_per_node={args.gpus}", + "run_for_bash.py", + "--config", + f"{config_path}", + "--data", + f"{dataset}", + "--verbose", + "--work-dir", + f"{save_dir}", + ] + + stdout_file = os.path.join(save_dir, f"{dataset}_stdout.log") + stderr_file = os.path.join(save_dir, f"{dataset}_stderr.log") + + with open(stdout_file, "w") as stdout, open(stderr_file, "w") as stderr: + try: + print(f"Output redirected to {stdout_file}") + print(f"Errors redirected to {stderr_file}") + subprocess.run( + command, env=env_vars, check=True, stdout=stdout, stderr=stderr + ) + # os.symlink(source, link_name) + + except subprocess.CalledProcessError as e: + print(f"torchrun failed. Check {stderr_file} for error details.") + + +if __name__ == "__main__": + main() diff --git a/do_eval.sh b/do_eval.sh new file mode 100644 index 0000000000000000000000000000000000000000..5532b4b03aa1b8f48dbde051f47ea592ffccbebb --- /dev/null +++ b/do_eval.sh @@ -0,0 +1,17 @@ +cd /user/konglingyu +source venv/tabfact/bin/activate +cd VLMEvalKit + +CUDA_VISIBLE_DEVICES=0 python do_eval_temp.py --run_name NEW_naive_grpo_step_400 --gpus 8 --path /user/xuqixin/checkpoints/qwen25_vl-7b/grpo_v7_exp8_qwen25vl_grpo_opensource_math_doc_vanilla_grpo/global_step_400/actor/huggingface --dataset EMMA-mini & + +CUDA_VISIBLE_DEVICES=1 python do_eval_temp.py --run_name NEW_grpo_v7_exp0_qwen25vl_grpo_opensource_math_onlinefilter_regen --gpus 8 --path /user/xuqixin/checkpoints/qwen25_vl-7b/grpo_v7_exp0_qwen25vl_grpo_opensource_math_onlinefilter_regen/global_step_300/actor/huggingface --dataset EMMA-mini & + +CUDA_VISIBLE_DEVICES=2 python do_eval_temp.py --run_name NEW_dr_grpo_step_800 --gpus 8 --path /user/xuqixin/checkpoints/qwen25_vl-7b/grpo_v7_exp9_qwen25vl_grpo_opensource_math_doc_dr_grpo/global_step_800/actor/huggingface --dataset EMMA-mini & + +CUDA_VISIBLE_DEVICES=3 python do_eval_temp.py --run_name NEW_dr_grpo_step_600 --gpus 8 --path /user/xuqixin/checkpoints/qwen25_vl-7b/grpo_v7_exp9_qwen25vl_grpo_opensource_math_doc_dr_grpo/global_step_600/actor/huggingface --dataset EMMA-mini & + +CUDA_VISIBLE_DEVICES=4 python do_eval_temp.py --run_name NEW_bbox_step_300 --gpus 8 --path /user/xuqixin/checkpoints/qwen25_vl-7b/grpo_v7_exp10_qwen25_vl_sft_bbox_grpo_opensource_doc/global_step_300/actor/huggingface --dataset EMMA-mini & + +CUDA_VISIBLE_DEVICES=5 python do_eval_temp.py --run_name NEW_clip_high_step_500 --gpus 8 --path /user/xuqixin/checkpoints/qwen25_vl-7b/grpo_v6_exp7_qwen25vl_grpo_opensource_doc_clip_high_028/global_step_500/actor/huggingface --dataset EMMA-mini & + +CUDA_VISIBLE_DEVICES=6 python do_eval_temp.py --run_name NEW_clip_high_step_600 --gpus 8 --path /user/xuqixin/checkpoints/qwen25_vl-7b/grpo_v6_exp7_qwen25vl_grpo_opensource_doc_clip_high_028/global_step_600/actor/huggingface --dataset EMMA-mini & \ No newline at end of file diff --git a/do_eval_emma.py b/do_eval_emma.py new file mode 100644 index 0000000000000000000000000000000000000000..20b6b189eda048245d385561e70067fe2706c5c7 --- /dev/null +++ b/do_eval_emma.py @@ -0,0 +1,222 @@ +import argparse +import json +import os +from datetime import datetime +import subprocess +import logging + +full_datasets = { + "MathVista_MINI": ["train_prompt_sampling"], + "MathVision": ["train_prompt_greedy"], + "MathVerse_MINI": ["train_prompt_greedy"], + "MMMU_DEV_VAL": ["origin_prompt_greedy"], + "MMStar": ["train_prompt_greedy"], + "DynaMath": ["train_prompt_greedy"], + "WeMath": ["train_prompt_greedy"], + "TextVQA_VAL": ["origin_prompt_greedy"], + "MMVet": ["origin_prompt_greedy"], + "MMDocBench": ["origin_prompt_greedy"], + "AI2D_TEST": ["origin_prompt_greedy"], + "HallusionBench": ["origin_prompt_greedy"], + "MMBench_DEV_EN_V11": ["origin_prompt_greedy"], + "OCRBench": ["origin_prompt_greedy"], + "DocVQA_VAL": ["origin_prompt_greedy"], + # "EMMA-mini": ["train_prompt_sampling"], + "EMMA": ["train_prompt_sampling"], + # "DocVQA_TEST": ["origin_prompt_greedy"], + # "MMBench_TEST_EN_V11": ["origin_prompt_greedy"], +} + +settings = { + "train_prompt_sampling": { + "use_reasoning_prompt": 2, + "do_sample": True, + "top_p": 1, + "top_k": -1, + "temperature": 1, + }, + "train_prompt_greedy": { + "use_reasoning_prompt": 2, + "do_sample": True, + "top_p": 0.001, + "top_k": 1, + "temperature": 0.01, + }, + "origin_prompt_greedy": { + "use_reasoning_prompt": 0, + "do_sample": True, + "top_p": 0.001, + "top_k": 1, + "temperature": 0.01, + }, +} + + +def main(): + parser = argparse.ArgumentParser() + + parser.add_argument("--run_name", type=str, required=True, help="Name of the run") + parser.add_argument("--gpus", type=int, default=8, help="Number of GPUs to use") + parser.add_argument("--path", type=str, required=True, help="Path to the model") + parser.add_argument( + "--dataset", type=str, nargs="+", required=True, help="List of datasets to use" + ) + + parser.add_argument( + "--min_pixels", type=int, default=3136, help="Minimum number of pixels" + ) + parser.add_argument( + "--max_pixels", type=int, default=12845056, help="Maximum number of pixels" + ) + parser.add_argument( + "--max_new_tokens", type=int, default=2048, help="Maximum number of new tokens" + ) + + args = parser.parse_args() + assert len(args.dataset), "--dataset should be a list of datasets" + + datasets = args.dataset + if len(args.dataset) == 1 and args.dataset[0] == "full": + datasets = list(full_datasets.keys()) + + for dataset in datasets: + assert ( + dataset in full_datasets + ), f"Dataset {dataset} is not in the list of available datasets: {list(full_datasets.keys())}" + + print("Datasets to be used:", datasets) + print("Run name:", args.run_name) + print("Number of GPUs:", args.gpus) + print("Model path:", args.path) + print("Minimum pixels:", args.min_pixels) + print("Maximum pixels:", args.max_pixels) + print("Maximum new tokens:", args.max_new_tokens, flush=True) + + for dataset in datasets: + assert isinstance(full_datasets[dataset], list) + for setting in full_datasets[dataset]: + config = { + "model": { + args.run_name: { + "class": "Qwen2VLChat", + "model_path": args.path, + "min_pixels": args.min_pixels, + "max_pixels": args.max_pixels, + "use_vllm": True, + "max_new_tokens": args.max_new_tokens, + **settings[setting], + }, + }, + "datasets": datasets, + } + + current_datetime = datetime.now().strftime("%Y%m%d") + save_dir = f"public_eval/{args.run_name}/{dataset}_{setting}/{current_datetime}" + os.makedirs(save_dir, exist_ok=True) + + config_name = f"config.json" + config_path = os.path.join(save_dir, config_name) + with open(config_path, "w") as json_file: + json.dump(config, json_file, indent=4) + + print(f"Start evaluating on {dataset}.") + print(f"Eval config {setting}", flush=True) + + env_vars = os.environ.copy() + env_vars["VLLM_USE_V1"] = "0" + + if dataset == "EMMA" or dataset == "EMMA-mini": + command = [ + "torchrun", + f"--nproc_per_node={args.gpus}", + "EMMA/generate_response.py", + "--dataset_name", + f"/root/LMUData/{dataset}", + "--model_path", + f"{args.path}", + "--output_path", + f"{save_dir}/results.json", + "--config_path", + "/user/konglingyu/VLMEvalKit/EMMA/configs/gpt.yaml", + "--strategy", + "CoT" + ] + + stdout_file = os.path.join(save_dir, f"out.log") + stderr_file = os.path.join(save_dir, f"err.log") + + with open(stdout_file, "w") as stdout, open(stderr_file, "w") as stderr: + try: + print(f"Output redirected to {stdout_file}") + print(f"Errors redirected to {stderr_file}", flush=True) + + process = subprocess.Popen( + command, env=env_vars, stdout=stdout, stderr=subprocess.PIPE, text=True + ) + + for line in process.stderr: + print(line, end="") # 输出到屏幕 + stderr.write(line) # 写入文件 + + # 等待命令完成 + process.wait() + + if process.returncode != 0: + print(f"Command failed with return code {process.returncode}. Check {stderr_file} for error details.", flush=True) + continue + + data = {} + for i in range(args.gpus): + assert os.path.exists(f"{save_dir}/results_{i}.json") + data.update(json.load(open(f"{save_dir}/results_{i}.json", "r"))) + with open(f"{save_dir}/results.json", "w") as f: + json.dump(data, f, indent=4) + from EMMA.evaluation.evaluate import gen_true_false + from EMMA.evaluation.calculate_acc import gen_score + gen_true_false(f"{save_dir}/results.json") + gen_score(f"{save_dir}/results.json", f"{save_dir}/results_acc.json") + except Exception as e: + print(f"torchrun failed. Check {stderr_file} for error details.", flush=True) + else: + command = [ + "torchrun", + f"--nproc_per_node={args.gpus}", + "run_for_bash.py", + "--config", + f"{config_path}", + "--data", + f"{dataset}", + "--verbose", + "--work-dir", + f"{save_dir}", + ] + + stdout_file = os.path.join(save_dir, f"out.log") + stderr_file = os.path.join(save_dir, f"err.log") + + with open(stdout_file, "w") as stdout, open(stderr_file, "w") as stderr: + try: + print(f"Output redirected to {stdout_file}") + print(f"Errors redirected to {stderr_file}", flush=True) + + process = subprocess.Popen( + command, env=env_vars, stdout=stdout, stderr=subprocess.PIPE, text=True + ) + + for line in process.stderr: + print(line, end="") # 输出到屏幕 + stderr.write(line) # 写入文件 + + # 等待命令完成 + process.wait() + + if process.returncode != 0: + print(f"Command failed with return code {process.returncode}. Check {stderr_file} for error details.", flush=True) + except subprocess.CalledProcessError as e: + print(f"torchrun failed. Check {stderr_file} for error details.", flush=True) + + +if __name__ == "__main__": + if not os.path.exists("/root/LMUData"): + os.symlink("/user/konglingyu/LMUData", "/root/LMUData") + main() diff --git a/do_eval_temp.py b/do_eval_temp.py new file mode 100644 index 0000000000000000000000000000000000000000..4e270d594869984ef51e3f881311ebcf44ade404 --- /dev/null +++ b/do_eval_temp.py @@ -0,0 +1,196 @@ +import argparse +import json +import os +from datetime import datetime +import subprocess +import logging + +full_datasets = { + "MathVista_MINI": ["train_prompt_sampling"], + "MathVision": ["train_prompt_greedy"], + "MathVerse_MINI": ["train_prompt_greedy"], + "MMMU_DEV_VAL": ["origin_prompt_greedy"], + "MMStar": ["train_prompt_greedy"], + "DynaMath": ["train_prompt_greedy"], + "WeMath": ["train_prompt_greedy"], + "TextVQA_VAL": ["origin_prompt_greedy"], + "MMVet": ["origin_prompt_greedy"], + "MMDocBench": ["origin_prompt_greedy"], + "AI2D_TEST": ["origin_prompt_greedy"], + "HallusionBench": ["origin_prompt_greedy"], + "MMBench_DEV_EN_V11": ["origin_prompt_greedy"], + "OCRBench": ["origin_prompt_greedy"], + "DocVQA_VAL": ["origin_prompt_greedy"], + "EMMA-mini": ["train_prompt_sampling"], + # "DocVQA_TEST": ["origin_prompt_greedy"], + # "MMBench_TEST_EN_V11": ["origin_prompt_greedy"], +} + +settings = { + "train_prompt_sampling": { + "use_reasoning_prompt": 2, + "do_sample": True, + "top_p": 1, + "top_k": -1, + "temperature": 1, + }, + "train_prompt_greedy": { + "use_reasoning_prompt": 2, + "do_sample": True, + "top_p": 0.001, + "top_k": 1, + "temperature": 0.01, + }, + "origin_prompt_greedy": { + "use_reasoning_prompt": 0, + "do_sample": True, + "top_p": 0.001, + "top_k": 1, + "temperature": 0.01, + }, +} + + +def main(): + parser = argparse.ArgumentParser() + + parser.add_argument("--run_name", type=str, required=True, help="Name of the run") + parser.add_argument("--gpus", type=int, default=8, help="Number of GPUs to use") + parser.add_argument("--path", type=str, required=True, help="Path to the model") + parser.add_argument( + "--dataset", type=str, nargs="+", required=True, help="List of datasets to use" + ) + + parser.add_argument( + "--min_pixels", type=int, default=3136, help="Minimum number of pixels" + ) + parser.add_argument( + "--max_pixels", type=int, default=12845056, help="Maximum number of pixels" + ) + parser.add_argument( + "--max_new_tokens", type=int, default=2048, help="Maximum number of new tokens" + ) + + args = parser.parse_args() + assert len(args.dataset), "--dataset should be a list of datasets" + + datasets = args.dataset + if len(args.dataset) == 1 and args.dataset[0] == "full": + datasets = list(full_datasets.keys()) + + for dataset in datasets: + assert ( + dataset in full_datasets + ), f"Dataset {dataset} is not in the list of available datasets: {list(full_datasets.keys())}" + + print("Datasets to be used:", datasets) + print("Run name:", args.run_name) + print("Number of GPUs:", args.gpus) + print("Model path:", args.path) + print("Minimum pixels:", args.min_pixels) + print("Maximum pixels:", args.max_pixels) + print("Maximum new tokens:", args.max_new_tokens, flush=True) + + for dataset in datasets: + assert isinstance(full_datasets[dataset], list) + for setting in full_datasets[dataset]: + config = { + "model": { + args.run_name: { + "class": "Qwen2VLChat", + "model_path": args.path, + "min_pixels": args.min_pixels, + "max_pixels": args.max_pixels, + "use_vllm": True, + "max_new_tokens": args.max_new_tokens, + **settings[setting], + }, + }, + "datasets": datasets, + } + + current_datetime = datetime.now().strftime("%Y%m%d") + save_dir = f"public_eval/{args.run_name}/{dataset}_{setting}/{current_datetime}" + os.makedirs(save_dir, exist_ok=True) + + config_name = f"config.json" + config_path = os.path.join(save_dir, config_name) + with open(config_path, "w") as json_file: + json.dump(config, json_file, indent=4) + + print(f"Start evaluating on {dataset}.") + print(f"Eval config {setting}", flush=True) + + env_vars = os.environ.copy() + env_vars["VLLM_USE_V1"] = "0" + + if dataset == "EMMA" or dataset == "EMMA-mini": + logger = logging.getLogger('EMMA-logger') + logger.setLevel(level=logging.DEBUG) + + formatter = logging.Formatter('%(asctime)s - %(filename)s[line:%(lineno)d] - %(levelname)s: %(message)s') + + file_handler = logging.FileHandler(os.path.join(save_dir, f"out.log")) + file_handler.setLevel(level=logging.DEBUG) + file_handler.setFormatter(formatter) + + stream_handler = logging.StreamHandler() + stream_handler.setLevel(logging.DEBUG) + stream_handler.setFormatter(formatter) + + logger.addHandler(file_handler) + logger.addHandler(stream_handler) + + from EMMA.generate_response import do_generate + from EMMA.evaluation.evaluate import gen_true_false + from EMMA.evaluation.calculate_acc import gen_score + + dataset_name = f"/root/LMUData/{dataset}" + os.environ["VLLM_USE_V1"] = "0" + os.environ["VLLM_WORKER_MULTIPROC_METHOD"] = "spawn" + do_generate(dataset_name, args.path, f"{save_dir}/results.json", logger=logger, seed=114413) + gen_true_false(f"{save_dir}/results.json", logger=logger) + gen_score(f"{save_dir}/results.json", f"{save_dir}/results_acc.json", logger=logger) + else: + command = [ + "torchrun", + f"--nproc_per_node={args.gpus}", + "run_for_bash.py", + "--config", + f"{config_path}", + "--data", + f"{dataset}", + "--verbose", + "--work-dir", + f"{save_dir}", + ] + + stdout_file = os.path.join(save_dir, f"out.log") + stderr_file = os.path.join(save_dir, f"err.log") + + with open(stdout_file, "w") as stdout, open(stderr_file, "w") as stderr: + try: + print(f"Output redirected to {stdout_file}") + print(f"Errors redirected to {stderr_file}", flush=True) + + process = subprocess.Popen( + command, env=env_vars, stdout=stdout, stderr=subprocess.PIPE, text=True + ) + + for line in process.stderr: + print(line, end="") # 输出到屏幕 + stderr.write(line) # 写入文件 + + # 等待命令完成 + process.wait() + + if process.returncode != 0: + print(f"Command failed with return code {process.returncode}. Check {stderr_file} for error details.", flush=True) + except subprocess.CalledProcessError as e: + print(f"torchrun failed. Check {stderr_file} for error details.", flush=True) + + +if __name__ == "__main__": + if not os.path.exists("/root/LMUData"): + os.symlink("/user/konglingyu/LMUData", "/root/LMUData") + main() diff --git a/docs/en/.readthedocs.yaml b/docs/en/.readthedocs.yaml new file mode 100644 index 0000000000000000000000000000000000000000..c6cf8e2a075ea15f39dc7aba8faa98f464f52fe6 --- /dev/null +++ b/docs/en/.readthedocs.yaml @@ -0,0 +1,17 @@ +version: 2 + +# Set the version of Python and other tools you might need +build: + os: ubuntu-22.04 + tools: + python: "3.8" + +formats: + - epub + +sphinx: + configuration: docs/en/conf.py + +python: + install: + - requirements: requirements/docs.txt diff --git a/docs/en/ConfigSystem.md b/docs/en/ConfigSystem.md new file mode 100644 index 0000000000000000000000000000000000000000..1771d487274285d3cb6719fa9cb41d0d1030afe2 --- /dev/null +++ b/docs/en/ConfigSystem.md @@ -0,0 +1,67 @@ +# Config System + +By default, VLMEvalKit launches the evaluation by setting the model name(s) (defined in `/vlmeval/config.py`) and dataset name(s) (defined in `vlmeval/dataset/__init__.py` or `vlmeval/dataset/video_dataset_config.py`) in the `run.py` script with the `--model` and `--data` arguments. Such approach is simple and efficient in most scenarios, however, it may not be flexible enough when the user wants to evaluate multiple models / datasets with different settings. + +To address this, VLMEvalKit provides a more flexible config system. The user can specify the model and dataset settings in a json file, and pass the path to the config file to the `run.py` script with the `--config` argument. Here is a sample config json: + +```json +{ + "model": { + "GPT4o_20240806_T00_HIGH": { + "class": "GPT4V", + "model": "gpt-4o-2024-08-06", + "temperature": 0, + "img_detail": "high" + }, + "GPT4o_20240806_T10_Low": { + "class": "GPT4V", + "model": "gpt-4o-2024-08-06", + "temperature": 1.0, + "img_detail": "low" + }, + "GPT4o_20241120": {} + }, + "data": { + "MME-RealWorld-Lite": { + "class": "MMERealWorld", + "dataset": "MME-RealWorld-Lite" + }, + "MMBench_DEV_EN_V11": { + "class": "ImageMCQDataset", + "dataset": "MMBench_DEV_EN_V11" + }, + "MMBench_Video_8frame_nopack":{}, + "Video-MME_16frame_subs": { + "class": "VideoMME", + "dataset": "Video-MME", + "nframe": 16, + "use_subtitle": true + }, + } +} +``` + +Explanation of the config json: + +1. Now we support two fields: `model` and `data`, each of which is a dictionary. The key of the dictionary is the name of the model / dataset (set by the user), and the value is the setting of the model / dataset. +2. For items in `model`, the value is a dictionary containing the following keys: + - `class`: The class name of the model, which should be a class name defined in `vlmeval/vlm/__init__.py` (open-source models) or `vlmeval/api/__init__.py` (API models). + - Other kwargs: Other kwargs are model-specific parameters, please refer to the definition of the model class for detailed usage. For example, `model`, `temperature`, `img_detail` are arguments of the `GPT4V` class. It's noteworthy that the `model` argument is required by most model classes. + - Tip: The defined model in the `supported_VLM` of `vlmeval/config.py` can be used as a shortcut, for example, `GPT4o_20241120: {}` is equivalent to `GPT4o_20241120: {'class': 'GPT4V', 'model': 'gpt-4o-2024-11-20', 'temperature': 0, 'img_size': -1, 'img_detail': 'high', 'retry': 10, 'verbose': False}` +3. For the dictionary `data`, we suggest users to use the official dataset name as the key (or part of the key), since we frequently determine the post-processing / judging settings based on the dataset name. For items in `data`, the value is a dictionary containing the following keys: + - `class`: The class name of the dataset, which should be a class name defined in `vlmeval/dataset/__init__.py`. + - Other kwargs: Other kwargs are dataset-specific parameters, please refer to the definition of the dataset class for detailed usage. Typically, the `dataset` argument is required by most dataset classes. It's noteworthy that the `nframe` argument or `fps` argument is required by most video dataset classes. + - Tip: The defined dataset in the `supported_video_datasets` of `vlmeval/dataset/video_dataset_config.py` can be used as a shortcut, for example, `MMBench_Video_8frame_nopack: {}` is equivalent to `MMBench_Video_8frame_nopack: {'class': 'MMBenchVideo', 'dataset': 'MMBench-Video', 'nframe': 8, 'pack': False}`. +Saving the example config json to `config.json`, you can launch the evaluation by: + +```bash +python run.py --config config.json +``` + +That will generate the following output files under the working directory `$WORK_DIR` (Following the format `{$WORK_DIR}/{$MODEL_NAME}/{$MODEL_NAME}_{$DATASET_NAME}_*`): + +- `$WORK_DIR/GPT4o_20240806_T00_HIGH/GPT4o_20240806_T00_HIGH_MME-RealWorld-Lite*` +- `$WORK_DIR/GPT4o_20240806_T10_Low/GPT4o_20240806_T10_Low_MME-RealWorld-Lite*` +- `$WORK_DIR/GPT4o_20240806_T00_HIGH/GPT4o_20240806_T00_HIGH_MMBench_DEV_EN_V11*` +- `$WORK_DIR/GPT4o_20240806_T10_Low/GPT4o_20240806_T10_Low_MMBench_DEV_EN_V11*` +... diff --git a/docs/en/Contributors.md b/docs/en/Contributors.md new file mode 100644 index 0000000000000000000000000000000000000000..ddf50c6c4eb7caf352fe29069e65a93a2d4cac49 --- /dev/null +++ b/docs/en/Contributors.md @@ -0,0 +1,21 @@ +# Contributors + +## Contributors w. 3+ Major Contributions + +> In this section, we list all the contributors who have made significant contributions (3+) to the development of VLMEvalKit. + +New Qualified Contributors (2024.09): + +1. [amitbcp](https://github.com/amitbcp): The contributor helped support MUIRBench, Phi-3.5, Idefics3, VILA, and xGen-MM +2. [czczup](https://github.com/czczup): The contributor helped support the InternVL Series (V1.5, Mini-InternVL, V2, etc.) +3. [DseidLi](https://github.com/DseidLi): The contributor helped support LLaVA-OneVision, GQA, and developed the readthedocs site for VLMEvalKit +4. [mayubo2333](https://github.com/mayubo2333): The contributor helped support MMLongBench, SlideVQA, and DUDE +5. [sun-hailong](https://github.com/sun-hailong): The contributor helped support A-OKVQA, Parrot, MMMB, and MTL-MMBench +6. [PhoenixZ810](https://github.com/PhoenixZ810): The contributor helped support Video-ChatGPT, Chat-UniVI, and Llama-VID +7. [Cuiunbo](https://github.com/Cuiunbo): The contributor helped support OmniLMM-12B, MiniCPM-V Series (V1, V2, V2.5) + +## Full Contributor List + +> In this section, we list all the contributors as well as their corresponding contributions to the development of VLMEvalKit. + +TBD. diff --git a/docs/en/Development.md b/docs/en/Development.md new file mode 100644 index 0000000000000000000000000000000000000000..0fe5a60e22252a2098ed0edd47e6f219d51a0a4d --- /dev/null +++ b/docs/en/Development.md @@ -0,0 +1,145 @@ +# Develop new Benchmark / MLLM + +> 🛠️ How to implement a new Benchmark / VLM in VLMEvalKit? + +## Implement a new benchmark + +Example PR: **Math-Vision Benchmark** ([#292](https://github.com/open-compass/VLMEvalKit/pull/292/files)) + +In VLMEvalKit, benchmarks are organized as dataset classes. When you try to implement a new benchmark, you can either reuse existing dataset classes (*e.g.*, You can reuse `ImageMCQDataset` when implementing a new multi-choice benchmark), or support a new dataset class. Each dataset must have the following two member functions (either reuse the one of the parent class or implement your own): + +- `build_prompt(self, line)`: The function input `line` is an integer (the sample index) or a `pd.Series` object (the raw record of the sample). The function outputs a `multi-modal message`, serving as the input of an MLLM. The `multi-modal message` is an interleaved list of multi-modal messages adopting the following format (the example includes an image and a text message): `[dict(type='image', value=IMAGE_PTH), dict(type='text', value=prompt)]`. +- `evaluate(self, eval_file, **judge_kwargs)`: The function input `eval_file` is the MLLM prediction (typically in `.xlsx` format). If the benchmark requires an external LLM (typically GPT) for evaluation, then `judge_kwargs` can pass the arguments for the LLM. The function outputs the benchmark evaluation results (metrics) in the form of `dict` or `pd.DataFrame`. + +We then brief the typical steps to implement a new benchmark under VLMEvalKit: + +### 1. Prepare your benchmark tsv file + +Currently, we organize a benchmark as one single TSV file. During inference, the data file will be automatically downloaded from the definited `DATASET_URL` link to `$LMUData` file (default path is `$HOME/LMUData`, if not set explicitly). You can upload the prepared TSV file to a downloadable address (e.g., Huggingface) or send it to us at . We will assist in uploading the dataset to the server. You can also customize `LMUData` path in the environment variable `LMUData=/path/to/your/data`. + +The contents of the TSV file consist of: + +| Dataset Name \ Fields | index | image | image_path | question | hint | multi-choice
options | answer | category | l2-category | split | +| --------------------------------------- | ----- | ----- | ---------- | -------- | ---- | ----------------------- | ------ | -------- | ----------- | ----- | +| MMBench_DEV_[CN/EN] | ✅ | ✅ | | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | +| MMBench_TEST_[CN/EN] | ✅ | ✅ | | ✅ | ✅ | ✅ | | ✅ | ✅ | ✅ | +| CCBench | ✅ | ✅ | | ✅ | | ✅ | ✅ | ✅ | | | +| SEEDBench_IMG | ✅ | ✅ | | ✅ | | ✅ | ✅ | ✅ | | | +| MME | ✅ | ✅ | | ✅ | | | ✅ | ✅ | | | +| MMVet | ✅ | ✅ | | ✅ | | | ✅ | ✅ | | | +| MMMU_DEV_VAL | ✅ | ✅ | ✅ | ✅ | | ✅ | ✅ | ✅ | ✅ | ✅ | +| COCO_VAL | ✅ | ✅ | | | | | ✅ | | | | +| OCRVQA_[TEST/TESTCORE] | ✅ | ✅ | | ✅ | | | ✅ | | | | +| TextVQA_VAL | ✅ | ✅ | | ✅ | | | ✅ | | | | +| VCR_[EN/ZH]\_[EASY/HARD]\_[ALL/500/100] | ✅ | ✅ | | ✅ | | | ✅ | | | | +| MMMB_[en/cn/pt/ar/tr/ru] | ✅ | ✅ | | ✅ | ✅ | ✅ | ✅ | ✅ | |✅ | +| MMBench_dev_[en/cn/pt/ar/tr/ru] | ✅ | ✅ | | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |✅ | + +
Table 1. TSV fields of supported datasets.
+ +**Intro to mandatory fields in the `TSV` file:** + +- **index:** Integer, Unique for each line in `tsv` +- **image:** The base64 of the image, you can use APIs implemented in `vlmeval/smp/vlm.py` for encoding and decoding: + - Encoding: `encode_image_to_base64 `(for PIL Image) / `encode_image_file_to_base64` (for image file path) + - Decoding: `decode_base64_to_image`(for PIL Image) / `decode_base64_to_image_file` (for image file path) +- **question**: The question corresponding to the image, a string +- **answer**: The answer to the question, a string. The `test` split does not need this field + +### 2. Cutomize your benchmark prompt + +`ImageBaseDataset` defines the default prompt format. If you need to add prompts specific to the dataset or input data in the `Interleave` format to the model, you can implement this through the `build_prompt(line)` function. This function takes a line from a TSV file as input, containing fields such as index, image, question, etc. The function returns a dictionary list of multimodal messages `msg` in the format `[dict(type='image', value=IMAGE_PTH), dict(type='text', value=prompt)]`, including the image path and the text prompt to be input into VLMs. For interleave type inputs, you can directly place the dictionary of the image path at the image token position. + +### 3. Cutomize your benchmark metrics + +To add evaluation for a new benchmark, you need to customize a class object to implement the dataset’s metrics calculation. Multimodal datasets inherit from the `ImageBaseDataset` object in `vlmeval/dataset/image_base.py`. The TYPE defines the type of dataset, `DATASET_URL` is the download address of the dataset, and `DATASET_MD5` is the MD5 checksum for consistency checking of the dataset file. + +In this class, **you need to implement** the `evaluate(eval_file, **judge_kwargs)` class function to calculate metrics and output results for the custom dataset. The function input `eval_file` is the path to the model prediction results file `{model_name}_{dataset}.xlsx`. This file can be read as a pandas.DataFrame using the `load(eval_file)` method, containing fields such as index, question, answer, category, prediction, etc. The judge_kwargs will pass a dictionary related to evaluation, such as the name of the `judge model`, the number of API request threads, etc. **The return value** of the function is the calculated accuracy and other metrics, formatted as a dictionary composed of lists, organized into a pandas.DataFrame. + +## Implement a new model + +Example PR: **Support LLaVA-Next-Interleave** ([#294](https://github.com/open-compass/VLMEvalKit/pull/294)) + +**1. Support `generate_inner` API (mandatory).** + +All existing models are implemented in `vlmeval/vlm`. For a minimal model, your model class **must implement the method** `generate_inner(msgs, dataset=None)`. In this function, you feed a multi-modal message to your VLM and return the VLM prediction (which is a string). The optional argument `dataset` can be used as the flag for the model to switch among various inference strategies. + +The multi-modal messages `msgs` is a list of dictionaries, each dictionary has two keys: type and value: +- `type`: We currently support two types, choices are ["image", "text"]. +- `value`: When type=='text' , the value is the text message (a single string); when type=='image', the value can be the local path of an image file, or the image URL. + +Currently a multi-modal message may contain arbitrarily interleaved images and texts. If your model do not support that, a practice can be taking the 1st image and concatenated text messages as the input. You can set the `INTERLEAVE = False` in your model class and use `self.message_to_promptimg(message, dataset=dataset)` to build your prompt and the first image's path. + +Here are some examples of multi-modal messages: + +```python +IMAGE_PTH = 'assets/apple.jpg' +IMAGE_URL = 'https://raw.githubusercontent.com/open-compass/VLMEvalKit/main/assets/apple.jpg' +msg1 = [ + dict(type='image', value=IMAGE_PTH), + dict(type='text', value='What is in this image?') +] +msg2 = [ + dict(type='image', value=IMAGE_URL), + dict(type='image', value=IMAGE_URL), + dict(type='text', value='How many apples are there in these images?') +] +response = model.generate(msg1) +``` + +For convenience sake, we also support to take a list of string as inputs. In that case, we will check if a string is an image path or image URL and automatically convert it to the list[dict] format: + +```python +IMAGE_PTH = 'assets/apple.jpg' +IMAGE_URL = 'https://raw.githubusercontent.com/open-compass/VLMEvalKit/main/assets/apple.jpg' +msg1 = [IMAGE_PTH, 'What is in this image?'] +msg2 = [IMAGE_URL, IMAGE_URL, 'How many apples are there in these images?'] +response = model.generate(msg1) +``` + +**Support Custom Prompt (optional).** + +Besides, your model can support **custom prompt building** by implementing two optional methods: `use_custom_prompt(dataset)` and `build_prompt(line, dataset=None)`. + +Both functions take the dataset name as the input: + +- `use_custom_prompt(dataset)` returns a boolean flag, indicating whether the model should use the custom prompt building strategy. +- If `use_custom_prompt(dataset)` returns True, `build_prompt(line, dataset)` should return a customly bulit multimodal message for the corresponding `dataset`, given `line`, which is a dictionary that includes the necessary information of a data sample. If `use_custom_prompt(dataset)` returns False, the default prompt building strategy will be used. + +**Support multi-turn chatting (optional).** + +You can also support the multi-turn chatting and evaluation with your VLM by supporting the `chat_inner(message, dataset)` function. The function outputs a single string response, and the `message` is a list of chat history, following the below format. + +```python +# Assume msg1, msg2, msg3, ... are multi-modal messages following the previously described format +# `chat_inner` take the following chat history list as input: +message = [ + dict(role='user', content=msg1), + dict(role='assistant', content=msg2), + dict(role='user', content=msg3), + dict(role='assistant', content=msg4), + ...... + dict(role='user', content=msgn), +] +# `message` should contain an odd number of chat utterances, the role of utterances should be interleaved "user" and "assistant", with the role of the last utterance to be "user". +# The chat function will call `chat_inner` +response = model.chat(message) +``` + +### Example PRs: + +- VLM that doesn't support interleaved images and texts, and does not use custom prompts: [[Model] Support glm-4v-9b](https://github.com/open-compass/VLMEvalKit/pull/221) +- VLM that supports interleaved images and texts and custom prompts: [Add MiniCPM-Llama3-V-2.5](https://github.com/open-compass/VLMEvalKit/pull/205) +- VLM API: [Feature add glmv](https://github.com/open-compass/VLMEvalKit/pull/201) + +## Contribute to VLMEvalKit + +If you want to contribute codes to **VLMEvalKit**, please do the pre-commit check before you submit a PR. That helps to keep the code tidy. + +```bash +# Under the directory of VLMEvalKit, install the pre-commit hook: +pip install pre-commit +pre-commit install +pre-commit run --all-files +# Then you can commit your code. +``` diff --git a/docs/en/EvalByLMDeploy.md b/docs/en/EvalByLMDeploy.md new file mode 100644 index 0000000000000000000000000000000000000000..fc0a8c38c26542eb44acdc74b28aaca9755735ba --- /dev/null +++ b/docs/en/EvalByLMDeploy.md @@ -0,0 +1,27 @@ +# Using LMDeploy to Accelerate Evaluation and Inference + +VLMEvalKit supports testing VLM models deployed by LMDeploy. Below, we use InternVL2-8B as an example to show how to test the model. + +## Step 0: Install LMDeploy + +```bash +pip install lmdeploy +``` +For other installation methods, you can refer to LMDeploy's [documentation](https://github.com/InternLM/lmdeploy). + +## Step 1: Start the Inference Service + +```bash +lmdeploy serve api_server OpenGVLab/InternVL2-8B --model-name InternVL2-8B +``` +> [!IMPORTANT] +> Since models in VLMEvalKit may have custom behaviors when building prompts for different datasets, such as InternVL2's handling of HallusionBench, it is necessary to specify `--model-name` when starting the server. This allows the VLMEvalKit to select appropriate prompt construction strategy based on the name when using the LMDeploy API. +> +> If `--server-port`, is specified, the corresponding environment variable `LMDEPLOY_API_BASE` needs to be set. + + +## Step 2: Evaluation + +```bash +python run.py --data MMStar --model lmdeploy --verbose --api-nproc 64 +``` diff --git a/docs/en/Makefile b/docs/en/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..d4bb2cbb9eddb1bb1b4f366623044af8e4830919 --- /dev/null +++ b/docs/en/Makefile @@ -0,0 +1,20 @@ +# Minimal makefile for Sphinx documentation +# + +# You can set these variables from the command line, and also +# from the environment for the first two. +SPHINXOPTS ?= +SPHINXBUILD ?= sphinx-build +SOURCEDIR = . +BUILDDIR = _build + +# Put it first so that "make" without argument is like "make help". +help: + @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) + +.PHONY: help Makefile + +# Catch-all target: route all unknown targets to Sphinx using the new +# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). +%: Makefile + @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) diff --git a/docs/en/Quickstart.md b/docs/en/Quickstart.md new file mode 100644 index 0000000000000000000000000000000000000000..9db5152b68c96882c7cf62b826626dbb73c2e716 --- /dev/null +++ b/docs/en/Quickstart.md @@ -0,0 +1,212 @@ +# Quickstart + +Before running the evaluation script, you need to **configure** the VLMs and set the model_paths properly. + +After that, you can use a single script `run.py` to inference and evaluate multiple VLMs and benchmarks at a same time. + +## Step 0. Installation & Setup essential keys + +**Installation.** + +```bash +git clone https://github.com/open-compass/VLMEvalKit.git +cd VLMEvalKit +pip install -e . +``` + +**Setup Keys.** + +To infer with API models (GPT-4v, Gemini-Pro-V, etc.) or use LLM APIs as the **judge or choice extractor**, you need to first setup API keys. VLMEvalKit will use an judge **LLM** to extract answer from the output if you set the key, otherwise it uses the **exact matching** mode (find "Yes", "No", "A", "B", "C"... in the output strings). **The exact matching can only be applied to the Yes-or-No tasks and the Multi-choice tasks.** +- You can place the required keys in `$VLMEvalKit/.env` or directly set them as the environment variable. If you choose to create a `.env` file, its content will look like: + + ```bash + # The .env file, place it under $VLMEvalKit + # API Keys of Proprietary VLMs + # QwenVL APIs + DASHSCOPE_API_KEY= + # Gemini w. Google Cloud Backends + GOOGLE_API_KEY= + # OpenAI API + OPENAI_API_KEY= + OPENAI_API_BASE= + # StepAI API + STEPAI_API_KEY= + # REKA API + REKA_API_KEY= + # GLMV API + GLMV_API_KEY= + # CongRong API + CW_API_BASE= + CW_API_KEY= + # SenseChat-V API + SENSECHAT_AK= + SENSECHAT_SK= + # Hunyuan-Vision API + HUNYUAN_SECRET_KEY= + HUNYUAN_SECRET_ID= + # LMDeploy API + LMDEPLOY_API_BASE= + # You can also set a proxy for calling api models during the evaluation stage + EVAL_PROXY= + ``` + +- Fill the blanks with your API keys (if necessary). Those API keys will be automatically loaded when doing the inference and evaluation. +## Step 1. Configuration + +**VLM Configuration**: All VLMs are configured in `vlmeval/config.py`. Few legacy VLMs (like MiniGPT-4, LLaVA-v1-7B) requires additional configuration (configuring the code / model_weight root in the config file). During evaluation, you should use the model name specified in `supported_VLM` in `vlmeval/config.py` to select the VLM. Make sure you can successfully infer with the VLM before starting the evaluation with the following command `vlmutil check {MODEL_NAME}`. + +## Step 2. Evaluation + +**New!!!** We integrated a new config system to enable more flexible evaluation settings. Check the [Document](/docs/en/ConfigSystem.md) or run `python run.py --help` for more details 🔥🔥🔥 + +We use `run.py` for evaluation. To use the script, you can use `$VLMEvalKit/run.py` or create a soft-link of the script (to use the script anywhere): + +**Arguments** + +- `--data (list[str])`: Set the dataset names that are supported in VLMEvalKit (names can be found in the codebase README). +- `--model (list[str])`: Set the VLM names that are supported in VLMEvalKit (defined in `supported_VLM` in `vlmeval/config.py`). +- `--mode (str, default to 'all', choices are ['all', 'infer'])`: When `mode` set to "all", will perform both inference and evaluation; when set to "infer", will only perform the inference. +- `--api-nproc (int, default to 4)`: The number of threads for OpenAI API calling. +- `--work-dir (str, default to '.')`: The directory to save evaluation results. + +**Command for Evaluating Image Benchmarks ** + +You can run the script with `python` or `torchrun`: + +```bash +# When running with `python`, only one VLM instance is instantiated, and it might use multiple GPUs (depending on its default behavior). +# That is recommended for evaluating very large VLMs (like IDEFICS-80B-Instruct). + +# IDEFICS-80B-Instruct on MMBench_DEV_EN, MME, and SEEDBench_IMG, Inference and Evalution +python run.py --data MMBench_DEV_EN MME SEEDBench_IMG --model idefics_80b_instruct --verbose +# IDEFICS-80B-Instruct on MMBench_DEV_EN, MME, and SEEDBench_IMG, Inference only +python run.py --data MMBench_DEV_EN MME SEEDBench_IMG --model idefics_80b_instruct --verbose --mode infer + +# When running with `torchrun`, one VLM instance is instantiated on each GPU. It can speed up the inference. +# However, that is only suitable for VLMs that consume small amounts of GPU memory. + +# IDEFICS-9B-Instruct, Qwen-VL-Chat, mPLUG-Owl2 on MMBench_DEV_EN, MME, and SEEDBench_IMG. On a node with 8 GPU. Inference and Evaluation. +torchrun --nproc-per-node=8 run.py --data MMBench_DEV_EN MME SEEDBench_IMG --model idefics_80b_instruct qwen_chat mPLUG-Owl2 --verbose +# Qwen-VL-Chat on MME. On a node with 2 GPU. Inference and Evaluation. +torchrun --nproc-per-node=2 run.py --data MME --model qwen_chat --verbose +``` + +**Command for Evaluating Video Benchmarks** + +```bash +# When running with `python`, only one VLM instance is instantiated, and it might use multiple GPUs (depending on its default behavior). +# That is recommended for evaluating very large VLMs (like IDEFICS-80B-Instruct). + +# IDEFICS2-8B on MMBench-Video, with 8 frames as inputs and vanilla evaluation. On a node with 8 GPUs. MMBench_Video_8frame_nopack is a defined dataset setting in `vlmeval/dataset/video_dataset_config.py`. +torchrun --nproc-per-node=8 run.py --data MMBench_Video_8frame_nopack --model idefics2_8 +# GPT-4o (API model) on MMBench-Video, with 1 frame per second as inputs and pack evaluation (all questions of a video in a single query). +python run.py --data MMBench_Video_1fps_pack --model GPT4o +``` + +The evaluation results will be printed as logs, besides. **Result Files** will also be generated in the directory `$YOUR_WORKING_DIRECTORY/{model_name}`. Files ending with `.csv` contain the evaluated metrics. + +### Frequently Asked Questions + +#### Constructing Input Prompt: The `build_prompt()` Function +If you find that the model's output does not match the expected results when evaluating a specific benchmark, it could be due to the model not constructing the input prompt correctly. + +In VLMEvalKit, each `dataset` class includes a function named `build_prompt()`, which is responsible for formatting input questions. Different benchmarks can either customize their own `build_prompt()` function or use the default implementation. + +For instance, when handling the default [Multiple-Choice QA](https://github.com/open-compass/VLMEvalKit/blob/43af13e052de6805a8b08cd04aed5e0d74f82ff5/vlmeval/dataset/image_mcq.py#L164), the `ImageMCQDataset.build_prompt()` method combines elements such as `hint`, `question`, and `options` (if present in the dataset) into a complete question format, as shown below: + +``` +HINT +QUESTION +Options: +A. Option A +B. Option B +··· +Please select the correct answer from the options above. +``` + +Additionally, since different models may have varying evaluation requirements, VLMEvalKit also supports customizing the prompt construction method at the model level through `model.build_prompt()`. For an example, you can refer to [InternVL](https://github.com/open-compass/VLMEvalKit/blob/43af13e052de6805a8b08cd04aed5e0d74f82ff5/vlmeval/vlm/internvl_chat.py#L324). + +**Note: If both `model.build_prompt()` and `dataset.build_prompt()` are defined, `model.build_prompt()` will take precedence over `dataset.build_prompt()`, effectively overriding it.** + +Some models, such as Qwen2VL and InternVL, define extensive prompt-building methods for various types of benchmarks. To provide more flexibility in adapting to different benchmarks, VLMEvalKit allows users to customize the `model.use_custom_prompt()` function within the model. By adding or modifying the `use_custom_prompt()` function, you can decide which benchmarks should utilize the model's custom prompt logic. Below is an example: + +```python +def use_custom_prompt(self, dataset: str) -> bool: + from vlmeval.dataset import DATASET_TYPE, DATASET_MODALITY + dataset_type = DATASET_TYPE(dataset, default=None) + if not self._use_custom_prompt: + return False + if listinstr(['MMVet'], dataset): + return True + if dataset_type == 'MCQ': + return True + if DATASET_MODALITY(dataset) == 'VIDEO': + return False + return False +``` +Only when the `use_custom_prompt()` function returns `True` will VLMEvalKit call the model's `build_prompt()` function for the current benchmark. +With this approach, you can flexibly control which benchmarks use the model's custom prompt logic based on your specific needs, thereby better adapting to different models and tasks. + +#### Model Splitting + +For large models with substantial parameter counts, such as InternVL2-78B, a single GPU may not be able to accommodate the entire model during inference. In such cases, you can define the environment variable `AUTO_SPLIT=1`. For models that support the `split_model()` function, the model will automatically be split and distributed across multiple GPUs. + +For example, on a machine equipped with 8 GPUs, you can run the model using the following command: + +```bash +# For an 8-GPU machine +AUTO_SPLIT=1 torchrun --nproc-per-node=1 run.py --data MMBench_DEV_EN --model InternVL2-76B --verbose +``` +This command will automatically split the InternVL2-76B model into 8 parts and run each part on a separate GPU. +#### Performance Discrepancies + +Model performance may vary across different environments. As a result, you might observe discrepancies between your evaluation results and those listed on the official VLMEvalKit leaderboard. These differences could be attributed to variations in versions of libraries such as `transformers`, `cuda`, and `torch`. + +Besides, if you encounter unexpected performance, we recommend first reviewing the local generation records (`{model}_{dataset}.xlsx`) or the evaluation records (`{model}_{dataset}_{judge_model}.xlsx`). This may help you better understand the evaluation outcomes and identify potential issues. + +## Deploy a local language model as the judge / choice extractor +The default setting mentioned above uses OpenAI's GPT as the judge LLM. However, you can also deploy a local judge LLM with [LMDeploy](https://github.com/InternLM/lmdeploy). + +First install: +``` +pip install lmdeploy openai +``` + +And then deploy a local judge LLM with the single line of code. LMDeploy will automatically download the model from Huggingface. Assuming we use internlm2-chat-1_8b as the judge, port 23333, and the key sk-123456 (the key must start with "sk-" and follow with any number you like): +``` +lmdeploy serve api_server internlm/internlm2-chat-1_8b --server-port 23333 +``` + +You need to get the model name registered by LMDeploy with the following python code: +``` +from openai import OpenAI +client = OpenAI( + api_key='sk-123456', + base_url="http://0.0.0.0:23333/v1" +) +model_name = client.models.list().data[0].id +``` + +Now set some environment variables to tell VLMEvalKit how to use the local judge LLM. As mentioned above, you can also set them in `$VLMEvalKit/.env` file: +``` +OPENAI_API_KEY=sk-123456 +OPENAI_API_BASE=http://0.0.0.0:23333/v1/chat/completions +LOCAL_LLM= +``` + +Finally, you can run the commands in step 2 to evaluate your VLM with the local judge LLM. + +Note that + +- If you hope to deploy the judge LLM in a single GPU and evaluate your VLM on other GPUs because of limited GPU memory, try `CUDA_VISIBLE_DEVICES=x` like +``` +CUDA_VISIBLE_DEVICES=0 lmdeploy serve api_server internlm/internlm2-chat-1_8b --server-port 23333 +CUDA_VISIBLE_DEVICES=1,2,3 torchrun --nproc-per-node=3 run.py --data HallusionBench --model qwen_chat --verbose +``` +- If the local judge LLM is not good enough in following the instructions, the evaluation may fail. Please report such failures (e.g., by issues). +- It's possible to deploy the judge LLM in different ways, e.g., use a private LLM (not from HuggingFace) or use a quantized LLM. Please refer to the [LMDeploy doc](https://lmdeploy.readthedocs.io/en/latest/serving/api_server.html). You can use any other deployment framework if they support OpenAI API. + + +### Using LMDeploy to Accelerate Evaluation and Inference + +You can refer this [doc](/docs/en/EvalByLMDeploy.md) diff --git a/docs/en/_static/css/readthedocs.css b/docs/en/_static/css/readthedocs.css new file mode 100644 index 0000000000000000000000000000000000000000..c83beffd261d9d7cb79dc499aec7187474639d89 --- /dev/null +++ b/docs/en/_static/css/readthedocs.css @@ -0,0 +1,63 @@ +.header-logo { + background-image: url("../image/logo.svg"); + background-size: 275px 80px; + height: 80px; + width: 275px; +} + + +@media screen and (min-width: 1100px) { + .header-logo { + top: -25px; + } +} + +pre { + white-space: pre; +} + +@media screen and (min-width: 2000px) { + .pytorch-content-left { + width: 1200px; + margin-left: 30px; + } + article.pytorch-article { + max-width: 1200px; + } + .pytorch-breadcrumbs-wrapper { + width: 1200px; + } + .pytorch-right-menu.scrolling-fixed { + position: fixed; + top: 45px; + left: 1580px; + } +} + + +article.pytorch-article section code { + padding: .2em .4em; + background-color: #f3f4f7; + border-radius: 5px; +} + +/* Disable the change in tables */ +article.pytorch-article section table code { + padding: unset; + background-color: unset; + border-radius: unset; +} + +table.autosummary td { + width: 50% +} + +img.align-center { + display: block; + margin-left: auto; + margin-right: auto; +} + +article.pytorch-article p.rubric { + font-weight: bold; +} diff --git a/docs/en/_static/image/logo.svg b/docs/en/_static/image/logo.svg new file mode 100644 index 0000000000000000000000000000000000000000..043530572afb48d0eac26b4b53d448aae6e9a9af --- /dev/null +++ b/docs/en/_static/image/logo.svg @@ -0,0 +1,24 @@ + + + +Created with Fabric.js 5.3.0 + + + + + + + + + + + + + VLMEvalKit + diff --git a/docs/en/_static/image/logo_icon.svg b/docs/en/_static/image/logo_icon.svg new file mode 100644 index 0000000000000000000000000000000000000000..c46dd3b5407c1f82dce4f6096acf8c8a30a6cfba --- /dev/null +++ b/docs/en/_static/image/logo_icon.svg @@ -0,0 +1,31 @@ + + + + + + + + + + + + + + + + + + + diff --git a/docs/en/_static/js/custom.js b/docs/en/_static/js/custom.js new file mode 100644 index 0000000000000000000000000000000000000000..84da69d47fae8e8994685aca3b99151d01a77978 --- /dev/null +++ b/docs/en/_static/js/custom.js @@ -0,0 +1,10 @@ +var collapsedSections = []; + +$(document).ready(function () { + $('.model-summary').DataTable({ + "stateSave": false, + "lengthChange": false, + "pageLength": 20, + "order": [] + }); +}); diff --git a/docs/en/_templates/404.html b/docs/en/_templates/404.html new file mode 100644 index 0000000000000000000000000000000000000000..64910175d5d69946845b04d5e6a378de205e8388 --- /dev/null +++ b/docs/en/_templates/404.html @@ -0,0 +1,18 @@ +{% extends "layout.html" %} + +{% block body %} + +

Page Not Found

+

+ The page you are looking for cannot be found. +

+

+ If you just switched documentation versions, it is likely that the page you were on is moved. You can look for it in + the content table left, or go to the homepage. +

+ + +{% endblock %} diff --git a/docs/en/_templates/autosummary/class.rst b/docs/en/_templates/autosummary/class.rst new file mode 100644 index 0000000000000000000000000000000000000000..4c3a7a9abf5c5b14ac3ef3b00a2f070480295358 --- /dev/null +++ b/docs/en/_templates/autosummary/class.rst @@ -0,0 +1,13 @@ +.. role:: hidden + :class: hidden-section +.. currentmodule:: {{ module }} + + +{{ name | underline}} + +.. autoclass:: {{ name }} + :members: + +.. + autogenerated from _templates/autosummary/class.rst + note it does not have :inherited-members: diff --git a/docs/en/_templates/callable.rst b/docs/en/_templates/callable.rst new file mode 100644 index 0000000000000000000000000000000000000000..3a7b9d2b96c76dfa3eb1d8bef56f58f219fe7760 --- /dev/null +++ b/docs/en/_templates/callable.rst @@ -0,0 +1,14 @@ +.. role:: hidden + :class: hidden-section +.. currentmodule:: {{ module }} + + +{{ name | underline}} + +.. autoclass:: {{ name }} + :members: + :special-members: __call__ + +.. + autogenerated from _templates/callable.rst + note it does not have :inherited-members: diff --git a/docs/en/conf.py b/docs/en/conf.py new file mode 100644 index 0000000000000000000000000000000000000000..360c1622dd18fcca8c033af9122383cd66c5f686 --- /dev/null +++ b/docs/en/conf.py @@ -0,0 +1,234 @@ +# flake8: noqa +# Configuration file for the Sphinx documentation builder. +# +# This file only contains a selection of the most common options. For a full +# list see the documentation: +# https://www.sphinx-doc.org/en/master/usage/configuration.html + +# -- Path setup -------------------------------------------------------------- + +# If extensions (or modules to document with autodoc) are in another directory, +# add these directories to sys.path here. If the directory is relative to the +# documentation root, use os.path.abspath to make it absolute, like shown here. +# +import os +import ast +import subprocess +import sys + +import pytorch_sphinx_theme +from sphinx.builders.html import StandaloneHTMLBuilder + +sys.path.insert(0, os.path.abspath('../../')) + +# -- Project information ----------------------------------------------------- + +project = 'VLMEvalKit' +copyright = '2023, VLMEvalKit' +author = 'VLMEvalKit Authors' + +# The full version, including alpha/beta/rc tags +version_file = '../../vlmeval/__init__.py' + + +def get_version(): + with open(version_file, 'r') as f: + file_content = f.read() + # Parse the file content into an abstract syntax tree (AST) + tree = ast.parse(file_content, filename=version_file) + + # Iterate through the body of the AST, looking for an assignment to __version__ + for node in tree.body: + if isinstance(node, ast.Assign): + for target in node.targets: + if isinstance(target, ast.Name) and target.id == '__version__': + return node.value.s + raise ValueError('__version__ not found') + + +release = get_version() + +# -- General configuration --------------------------------------------------- + +# Add any Sphinx extension module names here, as strings. They can be +# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom +# ones. +extensions = [ + 'sphinx.ext.autodoc', + 'sphinx.ext.autosummary', + 'sphinx.ext.intersphinx', + 'sphinx.ext.napoleon', + 'sphinx.ext.viewcode', + 'myst_parser', + 'sphinx_copybutton', + 'sphinx_tabs.tabs', + 'notfound.extension', + 'sphinxcontrib.jquery', + 'sphinx_design', +] + +# Add any paths that contain templates here, relative to this directory. +templates_path = ['_templates'] + +# The suffix(es) of source filenames. +# You can specify multiple suffix as a list of string: +# +source_suffix = { + '.rst': 'restructuredtext', + '.md': 'markdown', +} + +language = 'en' + +# The master toctree document. +root_doc = 'index' +html_context = { + 'github_version': 'latest', +} +# List of patterns, relative to source directory, that match files and +# directories to ignore when looking for source files. +# This pattern also affects html_static_path and html_extra_path. +exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] + +# -- Options for HTML output ------------------------------------------------- + +# The theme to use for HTML and HTML Help pages. See the documentation for +# a list of builtin themes. +# +html_theme = 'pytorch_sphinx_theme' +html_theme_path = [pytorch_sphinx_theme.get_html_theme_path()] + +# Theme options are theme-specific and customize the look and feel of a theme +# further. For a list of options available for each theme, see the +# documentation. +# yapf: disable +html_theme_options = { + 'menu': [ + { + 'name': 'GitHub', + 'url': 'https://github.com/open-compass/VLMEvalKit' + }, + ], + # Specify the language of shared menu + 'menu_lang': 'en', + # Disable the default edit on GitHub + 'default_edit_on_github': False, +} +# yapf: enable + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". +html_static_path = ['_static'] +html_css_files = [ + 'https://cdn.datatables.net/v/bs4/dt-1.12.1/datatables.min.css', + 'css/readthedocs.css' +] +html_js_files = [ + 'https://cdn.datatables.net/v/bs4/dt-1.12.1/datatables.min.js', + 'js/custom.js' +] + +# -- Options for HTMLHelp output --------------------------------------------- + +# Output file base name for HTML help builder. +htmlhelp_basename = 'vlmevalkitdoc' + +# -- Options for LaTeX output ------------------------------------------------ + +latex_elements = { + # The paper size ('letterpaper' or 'a4paper'). + # + # 'papersize': 'letterpaper', + + # The font size ('10pt', '11pt' or '12pt'). + # + # 'pointsize': '10pt', + + # Additional stuff for the LaTeX preamble. + # + # 'preamble': '', +} + +# Grouping the document tree into LaTeX files. List of tuples +# (source start file, target name, title, +# author, documentclass [howto, manual, or own class]). +latex_documents = [ + (root_doc, 'vlmevalkit.tex', 'VLMEvalKit Documentation', author, + 'manual'), +] + +# -- Options for manual page output ------------------------------------------ + +# One entry per manual page. List of tuples +# (source start file, name, description, authors, manual section). +man_pages = [(root_doc, 'vlmevalkit', 'VLMEvalKit Documentation', [author], + 1)] + +# -- Options for Texinfo output ---------------------------------------------- + +# Grouping the document tree into Texinfo files. List of tuples +# (source start file, target name, title, author, +# dir menu entry, description, category) +texinfo_documents = [ + (root_doc, 'vlmevalkit', 'VLMEvalKit Documentation', author, + 'VLMEvalKit Authors', 'AGI evaluation toolbox and benchmark.', + 'Miscellaneous'), +] + +# -- Options for Epub output ------------------------------------------------- + +# Bibliographic Dublin Core info. +epub_title = project + +# The unique identifier of the text. This can be a ISBN number +# or the project homepage. +# +# epub_identifier = '' + +# A unique identification for the text. +# +# epub_uid = '' + +# A list of files that should not be packed into the epub file. +epub_exclude_files = ['search.html'] + +# set priority when building html +StandaloneHTMLBuilder.supported_image_types = [ + 'image/svg+xml', 'image/gif', 'image/png', 'image/jpeg' +] + +# -- Extension configuration ------------------------------------------------- +# Ignore >>> when copying code +copybutton_prompt_text = r'>>> |\.\.\. ' +copybutton_prompt_is_regexp = True + +# Auto-generated header anchors +myst_heading_anchors = 3 +# Enable "colon_fence" extension of myst. +myst_enable_extensions = ['colon_fence', 'dollarmath'] + +# Configuration for intersphinx +intersphinx_mapping = { + 'python': ('https://docs.python.org/3', None), + 'numpy': ('https://numpy.org/doc/stable', None), + 'torch': ('https://pytorch.org/docs/stable/', None), + 'mmengine': ('https://mmengine.readthedocs.io/en/latest/', None), + 'transformers': + ('https://huggingface.co/docs/transformers/main/en/', None), +} +napoleon_custom_sections = [ + # Custom sections for data elements. + ('Meta fields', 'params_style'), + ('Data fields', 'params_style'), +] + +# Disable docstring inheritance +autodoc_inherit_docstrings = False +# Mock some imports during generate API docs. +autodoc_mock_imports = ['rich', 'attr', 'einops'] +# Disable displaying type annotations, these can be very verbose +autodoc_typehints = 'none' + +# The not found page +notfound_template = '404.html' diff --git a/docs/en/docutils.conf b/docs/en/docutils.conf new file mode 100644 index 0000000000000000000000000000000000000000..0c00c84688701117f231fd0c8ec295fb747b7d8f --- /dev/null +++ b/docs/en/docutils.conf @@ -0,0 +1,2 @@ +[html writers] +table_style: colwidths-auto diff --git a/docs/en/index.rst b/docs/en/index.rst new file mode 100644 index 0000000000000000000000000000000000000000..425c7de4de85670f8fd7a64d65fb786a9006f7e1 --- /dev/null +++ b/docs/en/index.rst @@ -0,0 +1,41 @@ +Welcome to the VLMEvalKit Tutorial! +========================================== + +VLMEvalKit Getting Started Guide +------------------------------- + +To help users get started quickly, we recommend the following process: + +- For users who want to use VLMEvalKit, we recommend reading the "Start Your First Step" section to set up the environment and start a mini-experiment to familiarize yourself with the process. + +- If you want to customize more modules, such as adding datasets and models, we provide an "Advanced Tutorial." + +We always welcome users' PRs (Pull Requests) and Issues to improve VLMEvalKit! + +.. _Start Your First Step: +.. toctree:: + :maxdepth: 1 + :caption: Start Your First Step + + Quickstart.md + +.. _Advanced Tutorial: +.. toctree:: + :maxdepth: 1 + :caption: Advanced Tutorial + + Development.md + ConfigSystem.md + +.. _Other Notes: +.. toctree:: + :maxdepth: 1 + :caption: Other Notes + + Contributors.md + +Index and Tables +================== + +* :ref:`genindex` +* :ref:`search` diff --git a/docs/ja/README_ja.md b/docs/ja/README_ja.md new file mode 100644 index 0000000000000000000000000000000000000000..5bf9564b098bec3748712b150d555ef963c400b9 --- /dev/null +++ b/docs/ja/README_ja.md @@ -0,0 +1,117 @@ +
+ +![LOGO](http://opencompass.openxlab.space/utils/MMLB.jpg) + +VLMEvalKit: 大規模視覚言語モデルの評価ツールキット + +[![][github-contributors-shield]][github-contributors-link] • [![][github-forks-shield]][github-forks-link] • [![][github-stars-shield]][github-stars-link] • [![][github-issues-shield]][github-issues-link] • [![][github-license-shield]][github-license-link] + +[English](/README.md) | [简体中文](/docs/zh-CN/README_zh-CN.md) | 日本語 + +🏆 OpenCompass Learderboard • +📊Datasets & Models • +🏗️Quickstart • +🛠️Development • +🎯Goal • +🖊️Citation + +🤗 HF Leaderboard • +🤗 Evaluation Records • +🔊 Discord Channel • +📝 Technical Report +
+ +**VLMEvalKit**(pythonパッケージ名は**vlmeval**)は、**大規模視覚言語モデル(LVLMs)**の**オープンソース評価ツールキット**です。このツールキットは、複数のリポジトリでのデータ準備という重労働なしに、さまざまなベンチマークでLVLMsの**ワンコマンド評価**を可能にします。VLMEvalKitでは、すべてのLVLMsに対して**生成ベースの評価**を採用し、**正確なマッチング**と**LLMベースの回答抽出**の両方で得られた評価結果を提供します。 + +PS: 日本語の README には最新のアップデートがすべて含まれていない場合があります。英語版をご確認ください。 + +## 📊 データセット、モデル、および評価結果 + +**公式のマルチモーダルリーダーボードでのパフォーマンス数値は、ここからダウンロードできます!** + +[**OpenVLM Leaderboard**](https://huggingface.co/spaces/opencompass/open_vlm_leaderboard): [すべての詳細な結果をダウンロード](http://opencompass.openxlab.space/assets/OpenVLM.json)。 + +**Supported Benchmarks** in [**VLMEvalKit Features**](https://aicarrier.feishu.cn/wiki/Qp7wwSzQ9iK1Y6kNUJVcr6zTnPe?table=tblsdEpLieDoCxtb) を確認して、すべてのサポートされているベンチマーク(70以上)を表示してください。 + +**Supported LMMs** in [**VLMEvalKit Features**](https://aicarrier.feishu.cn/wiki/Qp7wwSzQ9iK1Y6kNUJVcr6zTnPe?table=tblsdEpLieDoCxtb) を確認して、すべてのサポートされている LMMs(200以上)を表示してください。 + +**Transformersバージョンの推奨事項:** + +特定のtransformerバージョンで一部のVLMが実行できない可能性があることに注意してください。各VLMを評価するために、以下の設定を推奨します: + +- **`transformers==4.33.0`を使用してください**: `Qwenシリーズ`, `Monkeyシリーズ`, `InternLM-XComposerシリーズ`, `mPLUG-Owl2`, `OpenFlamingo v2`, `IDEFICSシリーズ`, `VisualGLM`, `MMAlaya`, `ShareCaptioner`, `MiniGPT-4シリーズ`, `InstructBLIPシリーズ`, `PandaGPT`, `VXVERSE`, `GLM-4v-9B`. +- **`transformers==4.37.0`を使用してください**: `LLaVAシリーズ`, `ShareGPT4Vシリーズ`, `TransCore-M`, `LLaVA (XTuner)`, `CogVLMシリーズ`, `EMU2シリーズ`, `Yi-VLシリーズ`, `MiniCPM-[V1/V2]`, `OmniLMM-12B`, `DeepSeek-VLシリーズ`, `InternVLシリーズ`, `Cambrianシリーズ`, `VILA-VLシリーズ`. +- **`transformers==4.40.0`を使用してください**: `IDEFICS2`, `Bunny-Llama3`, `MiniCPM-Llama3-V2.5`, `360VL-70B`, `Phi-3-Vision`, `WeMM`. +- **`transformers==4.42.0`を使用してください**: `AKI`. +- **`transformers==latest`を使用してください**: `LLaVA-Nextシリーズ`, `PaliGemma-3B`, `Chameleon-VLシリーズ`, `Video-LLaVA-7B-HF`, `Ovis1.5シリーズ`, `Mantisシリーズ`, `MiniCPM-V2.6`. + +```python +# デモ +from vlmeval.config import supported_VLM +model = supported_VLM['idefics_9b_instruct']() +# 単一画像のフォワード +ret = model.generate(['assets/apple.jpg', 'この画像には何がありますか?']) +print(ret) # この画像には葉がついた赤いリンゴがあります。 +# 複数画像のフォワード +ret = model.generate(['assets/apple.jpg', 'assets/apple.jpg', '提供された画像にはリンゴが何個ありますか?']) +print(ret) # 提供された画像にはリンゴが2個あります。 +``` + +## 🏗️ クイックスタート + +クイックスタートガイドについては、[クイックスタート](/docs/en/Quickstart.md)を参照してください。 + +## 🛠️ 開発ガイド + +カスタムベンチマーク、VLMsを開発するか、単に**VLMEvalKit**に他のコードを貢献する場合は、[開発ガイド](/docs/en/Development.md)を参照してください。 + +コミュニティからの共有を奨励し、それに応じたクレジットを共有するために、次回のレポート更新では以下のことを実施します: + +- 全ての貢献に対して感謝の意を示します +- 新しいモデル、評価セット、または主要な機能への3つ以上の主要な貢献を持つ貢献者は、テクニカルレポートの著者リストに加わることができます。適格な貢献者は、issueを作成するか、または[VLM評価キット ディスコードチャンネル](https://discord.com/invite/evDT4GZmxN)で kennyutc にDMを送ることができます。私たちはそれに応じてフォローアップします。 + +## 🎯 VLMEvalKitの目標 + +**このコードベースは以下を目的として設計されています:** + +1. 研究者や開発者が既存のLVLMsを評価し、評価結果を**簡単に再現できるようにする**ための**使いやすい**、**オープンソースの評価ツールキット**を提供します。 +2. VLMの開発者が自分のモデルを簡単に評価できるようにします。複数のサポートされているベンチマークでVLMを評価するには、単一の`generate_inner()`関数を**実装するだけで**、他のすべてのワークロード(データのダウンロード、データの前処理、予測の推論、メトリックの計算)はコードベースによって処理されます。 + +**このコードベースは以下を目的として設計されていません:** + +1. すべての**第三者ベンチマーク**の元の論文で報告された正確な精度数値を再現すること。その理由は2つあります: + 1. VLMEvalKitは、すべてのVLMに対して**生成ベースの評価**を使用します(オプションで**LLMベースの回答抽出**を使用)。一方、一部のベンチマークは異なるアプローチを使用する場合があります(SEEDBenchはPPLベースの評価を使用します)。これらのベンチマークについては、対応する結果で両方のスコアを比較します。開発者には、コードベースで他の評価パラダイムをサポートすることをお勧めします。 + 2. デフォルトでは、すべてのVLMに対して同じプロンプトテンプレートを使用してベンチマークを評価します。一方、**一部のVLMには特定のプロンプトテンプレートがある**場合があります(現時点ではコードベースでカバーされていない場合があります)。VLMの開発者には、現在カバーされていない場合でも、VLMEvalKitで独自のプロンプトテンプレートを実装することをお勧めします。これにより、再現性が向上します。 + +## 🖊️ 引用 + +この作業が役立つ場合は、このリポジトリに**スター🌟**を付けてください。サポートありがとうございます! + +[![Stargazers repo roster for @open-compass/VLMEvalKit](https://reporoster.com/stars/open-compass/VLMEvalKit)](https://github.com/open-compass/VLMEvalKit/stargazers) + +研究でVLMEvalKitを使用する場合、または公開されたオープンソースの評価結果を参照する場合は、以下のBibTeXエントリと、使用した特定のVLM/ベンチマークに対応するBibTexエントリを使用してください。 + +```bib +@misc{duan2024vlmevalkit, + title={VLMEvalKit: An Open-Source Toolkit for Evaluating Large Multi-Modality Models}, + author={Haodong Duan and Junming Yang and Yuxuan Qiao and Xinyu Fang and Lin Chen and Yuan Liu and Xiaoyi Dong and Yuhang Zang and Pan Zhang and Jiaqi Wang and Dahua Lin and Kai Chen}, + year={2024}, + eprint={2407.11691}, + archivePrefix={arXiv}, + primaryClass={cs.CV}, + url={https://arxiv.org/abs/2407.11691}, +} +``` + +

🔝Top に戻る

+ +[github-contributors-link]: https://github.com/open-compass/VLMEvalKit/graphs/contributors +[github-contributors-shield]: https://img.shields.io/github/contributors/open-compass/VLMEvalKit?color=c4f042&labelColor=black&style=flat-square +[github-forks-link]: https://github.com/open-compass/VLMEvalKit/network/members +[github-forks-shield]: https://img.shields.io/github/forks/open-compass/VLMEvalKit?color=8ae8ff&labelColor=black&style=flat-square +[github-issues-link]: https://github.com/open-compass/VLMEvalKit/issues +[github-issues-shield]: https://img.shields.io/github/issues/open-compass/VLMEvalKit?color=ff80eb&labelColor=black&style=flat-square +[github-license-link]: https://github.com/open-compass/VLMEvalKit/blob/main/LICENSE +[github-license-shield]: https://img.shields.io/github/license/open-compass/VLMEvalKit?color=white&labelColor=black&style=flat-square +[github-stars-link]: https://github.com/open-compass/VLMEvalKit/stargazers +[github-stars-shield]: https://img.shields.io/github/stars/open-compass/VLMEvalKit?color=ffcb47&labelColor=black&style=flat-square diff --git a/docs/zh-CN/.readthedocs.yaml b/docs/zh-CN/.readthedocs.yaml new file mode 100644 index 0000000000000000000000000000000000000000..b7e46fe34090d74f165034fb5bed93f2f112f42b --- /dev/null +++ b/docs/zh-CN/.readthedocs.yaml @@ -0,0 +1,17 @@ +version: 2 + +# Set the version of Python and other tools you might need +build: + os: ubuntu-22.04 + tools: + python: "3.8" + +formats: + - epub + +sphinx: + configuration: docs/zh-CN/conf.py + +python: + install: + - requirements: requirements/docs.txt diff --git a/docs/zh-CN/ConfigSystem.md b/docs/zh-CN/ConfigSystem.md new file mode 100644 index 0000000000000000000000000000000000000000..1ba48cef7796adb5a03231eef9f3afba8866184c --- /dev/null +++ b/docs/zh-CN/ConfigSystem.md @@ -0,0 +1,69 @@ + +# 配置系统 + +默认情况下,VLMEvalKit通过在`run.py`脚本中使用`--model`和`--data`参数设置模型名称(在`/vlmeval/config.py`中定义)和数据集名称(在`vlmeval/dataset/__init__.py` 或 `vlmeval/dataset/video_dataset_config.py` 中定义)来启动评估。这种方法在大多数情况下简单且高效,但当用户希望使用不同设置评估多个模型/数据集时,可能不够灵活。 + +为了解决这个问题,VLMEvalKit提供了一个更灵活的配置系统。用户可以在json文件中指定模型和数据集设置,并通过`--config`参数将配置文件的路径传递给`run.py`脚本。以下是一个示例配置json: + +```json +{ + "model": { + "GPT4o_20240806_T00_HIGH": { + "class": "GPT4V", + "model": "gpt-4o-2024-08-06", + "temperature": 0, + "img_detail": "high" + }, + "GPT4o_20240806_T10_Low": { + "class": "GPT4V", + "model": "gpt-4o-2024-08-06", + "temperature": 1.0, + "img_detail": "low" + }, + "GPT4o_20241120": {} + }, + "data": { + "MME-RealWorld-Lite": { + "class": "MMERealWorld", + "dataset": "MME-RealWorld-Lite" + }, + "MMBench_DEV_EN_V11": { + "class": "ImageMCQDataset", + "dataset": "MMBench_DEV_EN_V11" + }, + "MMBench_Video_8frame_nopack":{}, + "Video-MME_16frame_subs": { + "class": "VideoMME", + "dataset": "Video-MME", + "nframe": 16, + "use_subtitle": true + }, + } +} +``` + +配置json的解释: + +1. 现在我们支持两个字段:`model`和`data`,每个字段都是一个字典。字典的键是模型/数据集的名称(由用户设置),值是模型/数据集的设置。 +2. 对于`model`中的项目,值是一个包含以下键的字典: + - `class`:模型的类名,应该是`vlmeval/vlm/__init__.py`(开源模型)或`vlmeval/api/__init__.py`(API模型)中定义的类名。 + - 其他kwargs:其他kwargs是模型特定的参数,请参考模型类的定义以获取详细用法。例如,`model`、`temperature`、`img_detail`是`GPT4V`类的参数。值得注意的是,大多数模型类都需要`model`参数。 + - Tip:在位于`vlmeval/config.py`的变量`supported_VLM`中的已经被定义的模型可以作为`model`的键,而不需要填对应的值即可启动。例如,`GPT4o_20240806_T00_HIGH: {}`是等价于`GPT4o_20240806_T00_HIGH: {'class': 'GPT4V', 'model': 'gpt-4o-2024-08-06', 'temperature': 0, 'img_size': -1, 'img_detail': 'high', 'retry': 10, 'verbose': False}`。 +3. 对于字典`data`,我们建议用户使用官方数据集名称作为键(或键的一部分),因为我们经常根据数据集名称确定后处理/判断设置。对于`data`中的项目,值是一个包含以下键的字典: + - `class`:数据集的类名,应该是`vlmeval/dataset/__init__.py`中定义的类名。 + - 其他kwargs:其他kwargs是数据集特定的参数,请参考数据集类的定义以获取详细用法。通常,大多数数据集类都需要`dataset`参数。大多数视频数据集类都需要 `nframe` 或 `fps` 参数。 + - Tip:在位于`vlmeval/dataset/video_dataset_config.py`的变量`supported_video_dataset`中的已经被定义的数据集可以作为`data`的键,而不需要填对应的值即可启动。例如,`MMBench_Video_8frame_nopack: {}`是等价于`MMBench_Video_8frame_nopack: {'class': 'MMBenchVideo', 'dataset': 'MMBench-Video', 'nframe': 8, 'pack': False}`。 + +将示例配置json保存为`config.json`,您可以通过以下命令启动评估: + +```bash +python run.py --config config.json +``` + +这将在工作目录`$WORK_DIR`下生成以下输出文件(格式为`{$WORK_DIR}/{$MODEL_NAME}/{$MODEL_NAME}_{$DATASET_NAME}_*`): + +- `$WORK_DIR/GPT4o_20240806_T00_HIGH/GPT4o_20240806_T00_HIGH_MME-RealWorld-Lite*` +- `$WORK_DIR/GPT4o_20240806_T10_Low/GPT4o_20240806_T10_Low_MME-RealWorld-Lite*` +- `$WORK_DIR/GPT4o_20240806_T00_HIGH/GPT4o_20240806_T00_HIGH_MMBench_DEV_EN_V11*` +- `$WORK_DIR/GPT4o_20240806_T10_Low/GPT4o_20240806_T10_Low_MMBench_DEV_EN_V11*` +...... diff --git a/docs/zh-CN/Development.md b/docs/zh-CN/Development.md new file mode 100644 index 0000000000000000000000000000000000000000..69db06498d30354aac0720063589069587a89301 --- /dev/null +++ b/docs/zh-CN/Development.md @@ -0,0 +1,139 @@ +# 🛠️ 如何在 VLMEvalKit 中实现一个新的 Benchmark 或多模态模型(VLM) + +## 实现一个新的 benchmark + +示例 PR: **添加 Math-Vision Benchmark** ([#292](https://github.com/open-compass/VLMEvalKit/pull/292/files)) + +目前在 VLMEvalKit 中,benchmark 以数据集类的形式呈现,当你新增一个 benchmark 时,你可以选择复用现有的数据集类 (如单选题 benchmark 可复用 `ImageMCQDataset`),或是实现新的数据集类。你的数据集类必须支持以下两种方法 (复用父类或自行实现): + +- `build_prompt(self, line)`: 方法输入 `line` 类型为 int (对应数据 index) 或 `pd.Series` (对应数据原始 record)。方法输出一条 `multi-modal message` 作为多模态模型输入,`multi-modal message` 是一个图文交错的列表,如以下格式 (一图一文): `[dict(type='image', value=IMAGE_PTH), dict(type='text', value=prompt)]`。 +- `evaluate(self, eval_file, **judge_kwargs)`: 方法输入 `eval_file` 为多模态模型的预测结果 (多以 `.xlsx` 格式存在),如 benchmark evaluation 需要大语言模型 (一般为 GPT) 辅助,则 `judge_kwargs` 传入大语言模型的参数。方法输出 benchmark 的评测结果,以 `dict` 或 `pd.DataFrame` 的形式。 + +以下,我们简述新增数据集的通常步骤: + +### 1. TSV 数据文件准备 (图文评测集) + +目前,我们将每一个 benchmark 数据集设置为一个单独的 TSV 文件。在推理过程中,数据文件将从数据集定义的 `DATASET_URL` 链接地址自动下载到 `$LMUData` 中(如果没有明确设置的话,默认路径是 `$HOME/LMUData`)。你可以将准备好的 TSV 文件上传到一个可下载的地址(如:huggingface),或发送给我们 ,我们将帮助上传数据集到服务器中。此外,你也可以在环境变量中自定义设置下载路径 `LMUData=/path/to/your/data`。 + +TSV 文件中的内容组成为: + +| 数据集名称 \ 字段 | index | image | image_path | question | hint | multi-choice
options | answer | category | l2-category | split | +| ---------------------- | ----- | ----- | ---------- | -------- | ---- | ----------------------- | ------ | -------- | ----------- | ----- | +| MMBench_DEV_[CN/EN] | ✅ | ✅ | | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | +| MMBench_TEST_[CN/EN] | ✅ | ✅ | | ✅ | ✅ | ✅ | | ✅ | ✅ | ✅ | +| CCBench | ✅ | ✅ | | ✅ | | ✅ | ✅ | ✅ | | | +| SEEDBench_IMG | ✅ | ✅ | | ✅ | | ✅ | ✅ | ✅ | | | +| MME | ✅ | ✅ | | ✅ | | | ✅ | ✅ | | | +| MMVet | ✅ | ✅ | | ✅ | | | ✅ | ✅ | | | +| MMMU_DEV_VAL | ✅ | ✅ | ✅ | ✅ | | ✅ | ✅ | ✅ | ✅ | ✅ | +| COCO_VAL | ✅ | ✅ | | | | | ✅ | | | | +| OCRVQA_[TEST/TESTCORE] | ✅ | ✅ | | ✅ | | | ✅ | | | | +| TextVQA_VAL | ✅ | ✅ | | ✅ | | | ✅ | | | | +| VCR_[EN/ZH]\_[EASY/HARD]_[ALL/500/100] | ✅ | ✅ | | ✅ | | | ✅ | | | | + +
表 1. 支持的数据集的 TSV 字段。
+ +**TSV 中必须字段的介绍:** + +- **index:** 一个整数,`tsv` 中每一行的唯一标识 +- **image:** 图片的 base64 编码,你可以使用 `vlmeval/smp/vlm.py` 中实现的API进行编码和解码: + - 编码:`encode_image_to_base64`(对于PIL Image)/ `encode_image_file_to_base64`(对于图片文件路径) + - 解码:`decode_base64_to_image`(对于PIL Image)/ `decode_base64_to_image_file`(对于图片文件路径) +- **question:** 针对图像所提取出的问题,类型为字符串 +- **answer:** 问题的答案,类型为字符串,Test 集可缺失这一字段 + +### 2. 自定义数据集的 prompt 构建 + +`ImageBaseDataset` 定义了默认的 prompt 格式。如果需要针对数据集添加 prompt,或给模型输入 `Interleave` 的数据格式,可以通过 `build_prompt(line)` 函数实现。该函数输入为,每次给定 TSV 文件中的一行,包含 index, image, question 等内容作为 line。该函数将返回一个多模态消息 `msg` 的字典列表 `[dict(type='image', value=IMAGE_PTH), dict(type='text', value=prompt)]`,包括图片路径和将被输入到 VLMs 的文本 prompt。对于 interleave 类型输入,可以直接将图片路径的字典放置到 image token 位置。 + +### 3. 自定义数据集的指标实现 + +增加对 benchmark 的评测需要自定义一个该数据集的 class 对象,从而实现数据集的指标计算。图文多模态数据集均继承自 `vlmeval/dataset/image_base.py` 中的 `ImageBaseDataset` 对象。其中 `TYPE` 定义了数据集的类型;`DATASET_URL` 为数据集的下载地址;`DATASET_MD5` 为数据集文件的 md5 一致性编码检查。 + +在 class 中**需要实现** `evaluate(eval_file, **judge_kwargs)` 类函数,对自定义的数据集结果进行指标计算和结果输出。函数输入 `eval_file` 为模型预测结果 `{model_name}_{dataset}.xlsx` 的路径。可以通过 `load(eval_file)` 文件将其读取为 panda.DataFrames 类型,其中包含 index, question, answer, category, prediction 等字段。`judge_kwargs` 参数将传递一个评测相关的字典,如:judge 模型的名称,api 请求线程数等。**函数的返回值**为评估完成的准确度等指标,其格式为由 list 组成的字典,并组织成 panda.DataFrames 类型。 + +## 实现一个新的模型 + +示例 PR: **支持 LLaVA-Next-Interleave** ([#294](https://github.com/open-compass/VLMEvalKit/pull/294)) + +**1. 支持 `generate_inner` API (必须)** + +现有所有的模型都在 `vlmeval/vlm` 中实现。对于一个最基本的模型,你的模型类**应该实现方法** `generate_inner(msgs, dataset=None)`。这个函数将向 VLM 输入一个多模态数据,并返回 VLM 的预测(一个字符串)。可选参数 `dataset` 可以用作模型在不同推理策略之间切换的标志。 + +其中多模态消息 `msgs` 是一个字典列表,每个字典有两个键:类型和值: +- `type`:我们目前支持两种类型,选项是 ["image", "text"]。 +- `value`:当类型为 `text` 时,值是文本消息(一个字符串);当类型为 `image` 时,值可以是图像文件的本地路径,或者是图像的URL。 + +> 目前,一个多模态消息可能包含任意交错的图像和文本。如果你的模型不支持这一点,我们推荐的做法是取第一张图像和连接的文本消息作为模型的输入。你可以在模型的 class 中设置 `INTERLEAVE = False` 并调用 `self.message_to_promptimg(message, dataset=dataset)` 函数来获取你的 prompt 和第一张图片的地址。 + +一些多模态消息的例子: + +```python +IMAGE_PTH = 'assets/apple.jpg' +IMAGE_URL = 'https://raw.githubusercontent.com/open-compass/VLMEvalKit/main/assets/apple.jpg' +msg1 = [ + dict(type='image', value=IMAGE_PTH), + dict(type='text', value='What is in this image?') +] +msg2 = [ + dict(type='image', value=IMAGE_URL), + dict(type='image', value=IMAGE_URL), + dict(type='text', value='How many apples are there in these images?') +] +response = model.generate(msg1) +``` + +为了方便起见,我们还支持接受字符串列表作为输入。在这种情况下,我们将检查一个字符串是图像路径还是图像 URL,并自动将其转换为 `list[dict]` 格式: + +```python +IMAGE_PTH = 'assets/apple.jpg' +IMAGE_URL = 'https://raw.githubusercontent.com/open-compass/VLMEvalKit/main/assets/apple.jpg' +msg1 = [IMAGE_PTH, 'What is in this image?'] +msg2 = [IMAGE_URL, IMAGE_URL, 'How many apples are there in these images?'] +response = model.generate(msg1) +``` + +**2. 支持自定义提示词构建 (可选)** + +此外,你的模型可以通过实现两个可选方法来支持自定义提示构建:`use_custom_prompt(dataset)` 和 `build_prompt(line, dataset=None)`。 + +- `use_custom_prompt(dataset)` 将返回一个布尔值,指示模型是否应使用自定义提示构建策略。 +- 如果`use_custom_prompt(dataset)`返回 True,`build_prompt(line, dataset)` 应该为相应的数据集返回一个自定义构建的多模态消息,line 数据是一个包含数据样本所需信息的字典。如果`use_custom_prompt(dataset)` 返回False,则将使用默认的 prompt 构建策略。 + +**3. 支持多轮对话 (可选)** + +你可以通过支持 `chat_inner(message, dataset)` API 为你的模型新增多轮对话功能并兼容多轮对话评测。这个 API 输出一个字符串型回复,`message` 包含一个聊天记录的列表,格式如下: + +```python +# Assume msg1, msg2, msg3, ... are multi-modal messages following the previously described format +# `chat_inner` take the following chat history list as input: +message = [ + dict(role='user', content=msg1), + dict(role='assistant', content=msg2), + dict(role='user', content=msg3), + dict(role='assistant', content=msg4), + ...... + dict(role='user', content=msgn), +] +# `message` should contain an odd number of chat utterances, the role of utterances should be interleaved "user" and "assistant", with the role of the last utterance to be "user". +# The chat function will call `chat_inner` +response = model.chat(message) +``` + +### 示例 PRs: + +- 不支持交错的图像和文本,且不使用自定义提示的VLM:[[模型] 支持 glm-4v-9b](https://github.com/open-compass/VLMEvalKit/pull/221) +- 支持交错的图像和文本及自定义提示的VLM:[添加 MiniCPM-Llama3-V-2.5](https://github.com/open-compass/VLMEvalKit/pull/205) +- VLM API:[特征添加 glmv](https://github.com/open-compass/VLMEvalKit/pull/201) + +## 为 VLMEvalKit 贡献代码 + +如果你想为 **VLMEvalKit** 贡献代码,请在提交PR之前进行预提交检查。这有助于保持代码整洁。 + +```bash +# 在VLMEvalKit的目录下,安装预提交 hook: +pip install pre-commit +pre-commit install +pre-commit run --all-files +# 然后提交你的代码。 +``` diff --git a/docs/zh-CN/EvalByLMDeploy.md b/docs/zh-CN/EvalByLMDeploy.md new file mode 100644 index 0000000000000000000000000000000000000000..cdb46c70f0cc9d2620e0a98471f0c9b354472518 --- /dev/null +++ b/docs/zh-CN/EvalByLMDeploy.md @@ -0,0 +1,28 @@ +# 使用 LMDeploy 加速评测推理 + +VLMEvalKit 支持测试由 LMDeploy 部署的 VLM 模型,下面以 InternVL2-8B 为例,展示如何测试模型 + +## 第0步 安装 LMDeploy + +```bash +pip install lmdeploy +``` + +其他安装方式可以参考 LMDeploy 的[文档](https://github.com/InternLM/lmdeploy) + +## 第1步 启动推理服务 + +```bash +lmdeploy serve api_server OpenGVLab/InternVL2-8B --model-name InternVL2-8B +``` +> [!IMPORTANT] +> 因为 VLMEvalKit 中的模型对于不同数据集在构建 prompt 时可能有自定义行为,如 InternVL2 对于 HallusionBench 的处理,所以,server 端在启动的时候需要指定 `--model-name`,这样在使用 LMDEploy api 时可以根据名字选择合适的 prompt 构建策略。 +> +> 如果指定了 `--server-port`,需要设置对应的环境变量 `LMDEPLOY_API_BASE` + + +## 第2步 评测 + +```bash +python run.py --data MMStar --model InternVL2-8B --verbose --api-nproc 64 +``` diff --git a/docs/zh-CN/Makefile b/docs/zh-CN/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..d4bb2cbb9eddb1bb1b4f366623044af8e4830919 --- /dev/null +++ b/docs/zh-CN/Makefile @@ -0,0 +1,20 @@ +# Minimal makefile for Sphinx documentation +# + +# You can set these variables from the command line, and also +# from the environment for the first two. +SPHINXOPTS ?= +SPHINXBUILD ?= sphinx-build +SOURCEDIR = . +BUILDDIR = _build + +# Put it first so that "make" without argument is like "make help". +help: + @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) + +.PHONY: help Makefile + +# Catch-all target: route all unknown targets to Sphinx using the new +# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). +%: Makefile + @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) diff --git a/docs/zh-CN/Quickstart.md b/docs/zh-CN/Quickstart.md new file mode 100644 index 0000000000000000000000000000000000000000..90cbb793be51e797a44e200a089034a502804cc7 --- /dev/null +++ b/docs/zh-CN/Quickstart.md @@ -0,0 +1,207 @@ +# 快速开始 + +在运行评测脚本之前,你需要先**配置** VLMs,并正确设置模型路径。然后你可以使用脚本 `run.py` 进行多个VLMs和基准测试的推理和评估。 + +## 第0步 安装和设置必要的密钥 + +**安装** + +```bash +git clone https://github.com/open-compass/VLMEvalKit.git +cd VLMEvalKit +pip install -e . +``` + +**设置密钥** + +要使用 API 模型(如 GPT-4v, Gemini-Pro-V 等)进行推理,或使用 LLM API 作为**评判者或选择提取器**,你需要首先设置 API 密钥。如果你设置了密钥,VLMEvalKit 将使用一个评判 LLM 从输出中提取答案,否则它将使用**精确匹配模式**(在输出字符串中查找 "Yes", "No", "A", "B", "C"...)。**精确匹配模式只能应用于是或否任务和多项选择任务。** + +- 你可以将所需的密钥放在 `$VLMEvalKit/.env` 中,或直接将它们设置为环境变量。如果你选择创建 `.env` 文件,其内容将如下所示: + + ```bash + # .env 文件,将其放置在 $VLMEvalKit 下 + # 专有 VLMs 的 API 密钥 + # QwenVL APIs + DASHSCOPE_API_KEY= + # Gemini w. Google Cloud Backends + GOOGLE_API_KEY= + # OpenAI API + OPENAI_API_KEY= + OPENAI_API_BASE= + # StepAI API + STEPAI_API_KEY= + # REKA API + REKA_API_KEY= + # GLMV API + GLMV_API_KEY= + # CongRong API + CW_API_BASE= + CW_API_KEY= + # SenseChat-V API + SENSECHAT_AK= + SENSECHAT_SK= + # Hunyuan-Vision API + HUNYUAN_SECRET_KEY= + HUNYUAN_SECRET_ID= + # LMDeploy API + LMDEPLOY_API_BASE= + # 你可以设置一个评估时代理,评估阶段产生的 API 调用将通过这个代理进行 + EVAL_PROXY= + ``` + +- 如果需要使用 API 在对应键值空白处填写上你的密钥。这些 API 密钥将在进行推理和评估时自动加载。 +## 第1步 配置 + +**VLM 配置**:所有 VLMs 都在 `vlmeval/config.py` 中配置。对于某些 VLMs(如 MiniGPT-4、LLaVA-v1-7B),需要额外的配置(在配置文件中配置代码 / 模型权重根目录)。在评估时,你应该使用 `vlmeval/config.py` 中 `supported_VLM` 指定的模型名称来选择 VLM。确保在开始评估之前,你可以成功使用 VLM 进行推理,使用以下命令 `vlmutil check {MODEL_NAME}`。 + +## 第2步 评测 + +**新功能!!!** 我们集成了一个新的配置系统,以实现更灵活的评估设置。查看[文档](/docs/zh-CN/ConfigSystem.md)或运行`python run.py --help`了解更多详情 🔥🔥🔥 + +我们使用 `run.py` 进行评估。你可以使用 `$VLMEvalKit/run.py` 或创建脚本的软链接运行(以便在任何地方使用该脚本): + +**参数** + +- `--data (list[str])`: 设置在 VLMEvalKit 中支持的数据集名称(可以在代码库首页的 README 中找到支持的数据集列表) +- `--model (list[str])`: 设置在 VLMEvalKit 中支持的 VLM 名称(在 `vlmeval/config.py` 中的 `supported_VLM` 中定义) +- `--mode (str, 默认值为 'all', 可选值为 ['all', 'infer'])`:当 mode 设置为 "all" 时,将执行推理和评估;当设置为 "infer" 时,只执行推理 +- `--api-nproc (int, 默认值为 4)`: 调用 API 的线程数 +- `--work-dir (str, default to '.')`: 存放测试结果的目录 + +**用于评测图像多模态评测集的命令** + +你可以使用 `python` 或 `torchrun` 来运行脚本: + +```bash +# 使用 `python` 运行时,只实例化一个 VLM,并且它可能使用多个 GPU。 +# 这推荐用于评估参数量非常大的 VLMs(如 IDEFICS-80B-Instruct)。 + +# 在 MMBench_DEV_EN、MME 和 SEEDBench_IMG 上使用 IDEFICS-80B-Instruct 进行推理和评估 +python run.py --data MMBench_DEV_EN MME SEEDBench_IMG --model idefics_80b_instruct --verbose +# 在 MMBench_DEV_EN、MME 和 SEEDBench_IMG 上使用 IDEFICS-80B-Instruct 仅进行推理 +python run.py --data MMBench_DEV_EN MME SEEDBench_IMG --model idefics_80b_instruct --verbose --mode infer + +# 使用 `torchrun` 运行时,每个 GPU 上实例化一个 VLM 实例。这可以加快推理速度。 +# 但是,这仅适用于消耗少量 GPU 内存的 VLMs。 + +# 在 MMBench_DEV_EN、MME 和 SEEDBench_IMG 上使用 IDEFICS-9B-Instruct、Qwen-VL-Chat、mPLUG-Owl2。在具有 8 个 GPU 的节点上进行推理和评估。 +torchrun --nproc-per-node=8 run.py --data MMBench_DEV_EN MME SEEDBench_IMG --model idefics_80b_instruct qwen_chat mPLUG-Owl2 --verbose +# 在 MME 上使用 Qwen-VL-Chat。在具有 2 个 GPU 的节点上进行推理和评估。 +torchrun --nproc-per-node=2 run.py --data MME --model qwen_chat --verbose +``` + +**用于评测视频多模态评测集的命令** + +```bash +# 使用 `python` 运行时,只实例化一个 VLM,并且它可能使用多个 GPU。 +# 这推荐用于评估参数量非常大的 VLMs(如 IDEFICS-80B-Instruct)。 + +# 在 MMBench-Video 上评测 IDEFCIS2-8B, 视频采样 8 帧作为输入,不采用 pack 模式评测. MMBench_Video_8frame_nopack 是一个定义在 `vlmeval/dataset/video_dataset_config.py` 的数据集设定. +torchrun --nproc-per-node=8 run.py --data MMBench_Video_8frame_nopack --model idefics2_8 +# 在 MMBench-Video 上评测 GPT-4o (API 模型), 视频采样每秒一帧作为输入,采用 pack 模式评测 +python run.py --data MMBench_Video_1fps_pack --model GPT4o +``` + +评估结果将作为日志打印出来。此外,**结果文件**也会在目录 `$YOUR_WORKING_DIRECTORY/{model_name}` 中生成。以 `.csv` 结尾的文件包含评估的指标。 +### 常见问题 +#### 构建输入prompt:`build_prompt()`函数 +如果您在评测某个benchmark时,发现模型输出的结果与预期不符,可能是因为您使用的模型没有正确构建输入prompt。 + +在VLMEvalkit中,每个`dataset`类都包含一个名为`build_prompt()`的函数,用于构建输入问题的格式。不同的benchmark可以选择自定义`build_prompt()`函数,也可以使用默认的实现。 + +例如,在处理默认的[多选题/Multi-Choice QA]([vlmeval/dataset/image_mcq.py](https://github.com/open-compass/VLMEvalKit/blob/43af13e052de6805a8b08cd04aed5e0d74f82ff5/vlmeval/dataset/image_mcq.py#L164))时,`ImageMCQDataset.build_prompt()`类会将`hint`、`question`、`options`等元素(若数据集中包含)组合成一个完整的问题格式,如下所示: +``` +HINT +QUESTION +Options: +A. Option A +B. Option B +··· +Please select the correct answer from the options above. +``` + +此外,由于不同模型对评测的需求可能有所不同,VLMEvalkit也支持在模型层面自定义对不同benchmark构建prompt的方法,即`model.build_prompt()`,具体示例可以参考[InternVL](https://github.com/open-compass/VLMEvalKit/blob/43af13e052de6805a8b08cd04aed5e0d74f82ff5/vlmeval/vlm/internvl_chat.py#L324)。 + +**注意:当同时定义了`model.build_prompt()`以及`dataset.build_prompt()`时,`model.build_prompt()`将优先于`dataset.build_prompt()`,即前者会覆盖后者。** + +由于部分模型(如Qwen2VL,InternVL等)对于不同类型的benchmark定义了广泛的prompt构建方法,为了更灵活地适应不同的benchmark,VLMEvalkit支持在模型中自定义`model.use_custom_prompt()`函数。通过添加或者修改`use_custom_prompt()`函数,您可以决定对于哪些benchmark使用模型自定义的`use_custom_prompt()`方法,示例如下: +``` +def use_custom_prompt(self, dataset: str) -> bool: + from vlmeval.dataset import DATASET_TYPE, DATASET_MODALITY + dataset_type = DATASET_TYPE(dataset, default=None) + if not self._use_custom_prompt: + return False + if listinstr(['MMVet'], dataset): + return True + if dataset_type == 'MCQ': + return True + if DATASET_MODALITY(dataset) == 'VIDEO': + return False + return False +``` +仅当`use_custom_prompt()`函数返回`True`时,VLMEvalkit才会对当前benchmark调用模型的`build_prompt()`函数。 +通过这种方式,您可以根据具体需求灵活地控制哪些benchmark使用模型自定义的prompt构建逻辑,从而更好地适配不同模型和任务的需求。 + +#### 模型切分 +对于一些参数量较大的模型,如InternVL2-78B,由于其参数量较大,单个 GPU 可能无法容纳整个模型进行推理。 +在这种情况下,您可以定义环境变量`AUTO_SPLIT=1`,对于支持`split_model()`函数的模型,模型将会自动切分并分配到多个GPU上进行运行。 + +例如,在一台配备 8 块 GPU 的机器上,您可以使用以下命令来运行模型:: +``` +# 对于八卡机器 +AUTO_SPLIT=1 torchrun --nproc-per-node=1 run.py --data MMBench_DEV_EN --model InternVL2-76B --verbose +``` +这会将InternVL2-76B模型切分为 8 份,分别分配到 8 块 GPU 上进行推理。 + +#### 性能差距 +在不同的运行环境中,模型的性能表现可能会有所差异。因此,在评估过程中,您可能会发现自己的评测结果与VLMEvalKit官方榜单上的结果存在差距。这种差异可能与`transformers`, `cuda`, `torch`等版本的变化有关。 + +此外,对于异常的表现,我们建议您优先查看运行完成后的本地生成记录`{model}_{dataset}.xlsx`或者评估记录`{model}_{dataset}_{judge_model}.xlsx`,这可能会帮助您更好地理解评估结果并发现问题。 + + + +### 部署本地语言模型作为评判 / 选择提取器 +上述默认设置使用 OpenAI 的 GPT 作为评判 LLM。你也可以使用 [LMDeploy](https://github.com/InternLM/lmdeploy) 部署本地评判 LLM。 + +首先进行安装: +``` +pip install lmdeploy openai +``` + +然后可以通过一行代码部署本地评判 LLM。LMDeploy 将自动从 Huggingface 下载模型。假设我们使用 internlm2-chat-1_8b 作为评判,端口为 23333,密钥为 sk-123456(密钥必须以 "sk-" 开头,后跟任意数字): +``` +lmdeploy serve api_server internlm/internlm2-chat-1_8b --server-port 23333 +``` + +使用以下 Python 代码获取由 LMDeploy 注册的模型名称: +``` +from openai import OpenAI +client = OpenAI( + api_key='sk-123456', + base_url="http://0.0.0.0:23333/v1" +) +model_name = client.models.list().data[0].id +``` + +配置对应环境变量,以告诉 VLMEvalKit 如何使用本地评判 LLM。正如上面提到的,也可以在 `$VLMEvalKit/.env` 文件中设置: +``` +OPENAI_API_KEY=sk-123456 +OPENAI_API_BASE=http://0.0.0.0:23333/v1/chat/completions +LOCAL_LLM= +``` + +最后,你可以运行第2步中的命令,使用本地评判 LLM 来评估你的 VLM。 + +**请注意:** + +- 如果你希望将评判 LLM 部署在单独的一个 GPU 上,并且由于 GPU 内存有限而希望在其他 GPU 上评估你的 VLM,可以使用 `CUDA_VISIBLE_DEVICES=x` 这样的方法,例如: +``` +CUDA_VISIBLE_DEVICES=0 lmdeploy serve api_server internlm/internlm2-chat-1_8b --server-port 23333 +CUDA_VISIBLE_DEVICES=1,2,3 torchrun --nproc-per-node=3 run.py --data HallusionBench --model qwen_chat --verbose +``` +- 如果本地评判 LLM 在遵循指令方面不够好,评估过程可能会失败。请通过 issues 报告此类失败情况。 +- 可以以不同的方式部署评判 LLM,例如使用私有 LLM(而非来自 HuggingFace)或使用量化 LLM。请参考 [LMDeploy doc](https://lmdeploy.readthedocs.io/en/latest/serving/api_server.html) 文档。也可以使用其他支持 OpenAI API 框架的方法。 + +### 使用 LMDeploy 加速模型推理 + +可参考[文档](/docs/zh-CN/EvalByLMDeploy.md) diff --git a/docs/zh-CN/README_zh-CN.md b/docs/zh-CN/README_zh-CN.md new file mode 100644 index 0000000000000000000000000000000000000000..e7165c613b31261806e06a24af5413cf7caf742a --- /dev/null +++ b/docs/zh-CN/README_zh-CN.md @@ -0,0 +1,131 @@ +
+ +![LOGO](http://opencompass.openxlab.space/utils/MMLB.jpg) + +VLMEvalKit: 一种多模态大模型评测工具 + +[![][github-contributors-shield]][github-contributors-link] • [![][github-forks-shield]][github-forks-link] • [![][github-stars-shield]][github-stars-link] • [![][github-issues-shield]][github-issues-link] • [![][github-license-shield]][github-license-link] + +[English](/README.md) | 简体中文 | [日本語](/docs/ja/README_ja.md) + +🏆 OpenCompass 排行榜 • +🏗️ 快速开始 • +📊 数据集和模型 • +🛠️ 开发指南 • +🎯 我们的目标 • +🖊️ 引用 + +🤗 HuggingFace 排行榜 (存档全部性能) • +🤗 原始评测记录 • +🔊 Discord • +📝 技术报告 +
+ +**VLMEvalKit** (python 包名为 **vlmeval**) 是一款专为大型视觉语言模型 (Large Vision-Language Models, LVLMs) 评测而设计的开源工具包。该工具支持在各种基准测试上对大型视觉语言模型进行**一键评估**,无需进行繁重的数据准备工作,让评估过程更加简便。在 VLMEvalKit 中,我们对所有大型视觉语言模型生成的结果进行评测,并提供基于**精确匹配**与基于 **LLM 的答案提取**两种评测结果。 + +## 🆕 更新 +- **[2025-02-20]** 支持新模型:**InternVL2.5 series, QwenVL2.5 series, QVQ-72B, Doubao-VL, Janus-Pro-7B, MiniCPM-o-2.6, InternVL2-MPO, LLaVA-CoT, Hunyuan-Standard-Vision, Ovis2, Valley, SAIL-VL, Ross, Long-VITA, EMU3, SmolVLM**。支持新基准:**MMMU-Pro, WeMath, 3DSRBench, LogicVista, VL-RewardBench, CC-OCR, CG-Bench, CMMMU, WorldSense**。请参考[**VLMEvalKit Features**](https://aicarrier.feishu.cn/wiki/Qp7wwSzQ9iK1Y6kNUJVcr6zTnPe?table=tblsdEpLieDoCxtb)以获取更多信息。感谢社区的各位贡献者 🔥🔥🔥 +- **[2024-11-21]** 集成了一个新的配置系统,以实现更灵活的评估设置。查看[文档](/docs/zh-CN/ConfigSystem.md)或运行`python run.py --help`了解更多详情 🔥🔥🔥 +- **[2024-11-21]** 支持 **[QSpatial](https://andrewliao11.github.io/spatial_prompt/)**,一个用于定量空间推理的多模态基准(例如,确定大小/距离),感谢 **[andrewliao11](https://github.com/andrewliao11)** 提供官方支持 🔥🔥🔥 +- **[2024-11-21]** 支持 **[MM-Math](https://github.com/kge-sun/mm-math)**,一个包含约6K初中多模态推理数学问题的新多模态数学基准。GPT-4o-20240806在该基准上达到了22.5%的准确率 🔥🔥🔥 +- **[2024-11-16]** 支持 **[OlympiadBench](https://github.com/OpenBMB/OlympiadBench)**,一个多模态基准,包含奥林匹克级别的数学和物理问题 🔥🔥🔥 +- **[2024-11-16]** 支持 **[WildVision](https://huggingface.co/datasets/WildVision/wildvision-bench)**,一个基于多模态竞技场数据的主观多模态基准 🔥🔥🔥 +- **[2024-11-13]** 支持 **[MIA-Bench](https://arxiv.org/abs/2407.01509)**,一个多模态指令跟随基准 🔥🔥🔥 +- **[2024-11-08]** 支持 **[Aria](https://arxiv.org/abs/2410.05993)**,一个多模态原生 MoE 模型,感谢 **[teowu](https://github.com/teowu)** 🔥🔥🔥 +- **[2024-11-04]** 支持 **[WorldMedQA-V](https://www.arxiv.org/abs/2410.12722)**,该基准包含 1000 多个医学 VQA 问题,涵盖巴西、以色列、日本、西班牙等四个国家的语言,以及它们的英文翻译 🔥🔥🔥 +- **[2024-11-01]** 支持 `AUTO_SPLIT` 标志 (https://github.com/open-compass/VLMEvalKit/pull/566),用于在低配置 GPU 上进行评估。设置后,模型将自动拆分到多个 GPU(流水线并行)以减少 GPU 内存使用(目前仅支持部分 VLMs:Qwen2-VL、Llama-3.2、LLaVA-OneVision 等) 🔥🔥🔥 +- **[2024-10-30]** 支持评估 **[MLVU](https://github.com/JUNJIE99/MLVU)** 和 **[TempCompass](https://arxiv.org/abs/2403.00476v1)**。这两个基准将很快被纳入 **[OpenVLM 视频排行榜](https://huggingface.co/spaces/opencompass/openvlm_video_leaderboard)** 🔥🔥🔥 + +## 🏗️ 快速开始 + +请参阅[**快速开始**](/docs/zh-CN/Quickstart.md)获取入门指南。 + +## 📊 评测结果,支持的数据集和模型 + +### 评测结果 + +**[OpenVLM Leaderboard](https://huggingface.co/spaces/opencompass/open_vlm_leaderboard)**: **[下载全部细粒度测试结果](http://opencompass.openxlab.space/assets/OpenVLM.json)**. + +请查看[**VLMEvalKit Features**](https://aicarrier.feishu.cn/wiki/Qp7wwSzQ9iK1Y6kNUJVcr6zTnPe?table=tblsdEpLieDoCxtb)中的 **Supported Benchmarks** 标签,以查看所有支持的图像和视频基准(70+)。 + +请查看[**VLMEvalKit Features**](https://aicarrier.feishu.cn/wiki/Qp7wwSzQ9iK1Y6kNUJVcr6zTnPe?table=tblsdEpLieDoCxtb)中的 **Supported LMMs** 标签,以查看所有支持的 LMMs,包括商业 API、开源模型等(200+)。 + +### 其他 + +**Transformers 的版本推荐:** + +**请注意**,某些 VLM 可能无法在某些特定的 transformers 版本下运行,我们建议使用以下设置来评估对应的VLM: + +- **请用** `transformers==4.33.0` **来运行**: `Qwen series`, `Monkey series`, `InternLM-XComposer Series`, `mPLUG-Owl2`, `OpenFlamingo v2`, `IDEFICS series`, `VisualGLM`, `MMAlaya`, `ShareCaptioner`, `MiniGPT-4 series`, `InstructBLIP series`, `PandaGPT`, `VXVERSE`. +- **请用** `transformers==4.37.0 ` **来运行**: `LLaVA series`, `ShareGPT4V series`, `TransCore-M`, `LLaVA (XTuner)`, `CogVLM Series`, `EMU2 Series`, `Yi-VL Series`, `MiniCPM-[V1/V2]`, `OmniLMM-12B`, `DeepSeek-VL series`, `InternVL series`, `Cambrian Series`, `VILA Series`, `Llama-3-MixSenseV1_1`, `Parrot-7B`, `PLLaVA Series`. +- **请用** `transformers==4.40.0 ` **来运行**: `IDEFICS2`, `Bunny-Llama3`, `MiniCPM-Llama3-V2.5`, `360VL-70B`, `Phi-3-Vision`, `WeMM`. +- **请用** `transformers==4.42.0 ` **来运行**: `AKI`. +- **请用** `transformers==latest` **来运行**: `LLaVA-Next series`, `PaliGemma-3B`, `Chameleon series`, `Video-LLaVA-7B-HF`, `Ovis series`, `Mantis series`, `MiniCPM-V2.6`, `OmChat-v2.0-13B-sinlge-beta`, `Idefics-3`, `GLM-4v-9B`, `VideoChat2-HD`. + +**如何测试一个 VLM 是否可以正常运行:** + +```python +from vlmeval.config import supported_VLM +model = supported_VLM['idefics_9b_instruct']() +# 前向单张图片 +ret = model.generate(['assets/apple.jpg', 'What is in this image?']) +print(ret) # 这张图片上有一个带叶子的红苹果 +# 前向多张图片 +ret = model.generate(['assets/apple.jpg', 'assets/apple.jpg', 'How many apples are there in the provided images? ']) +print(ret) # 提供的图片中有两个苹果 +``` + +## 🛠️ 开发指南 + +要开发自定义评测数据集,支持其他 VLMs,或为 VLMEvalKit 贡献代码,请参阅[**开发指南**](/docs/zh-CN/Development_zh-CN.md)。 + +为激励来自社区的共享并分享相应的 credit,在下一次 report 更新中,我们将: + +- 致谢所有的 contribution +- 具备三个或以上主要贡献 (支持新模型、评测集、或是主要特性) 的贡献者将可以加入技术报告的作者列表 。合条件的贡献者可以创建 issue 或是在 [VLMEvalKit Discord Channel](https://discord.com/invite/evDT4GZmxN) 私信 kennyutc,我们将进行跟进 + +## 🎯 VLMEvalKit 的目标 + +**该代码库的设计目标是:** + +1. 提供一个**易于使用**的**开源评估工具包**,方便研究人员和开发人员评测现有的多模态大模型,并使评测结果**易于复现**。 +2. 使 VLM 开发人员能够轻松地评测自己的模型。在多个支持的基准测试上评估 VLM,只需实现一个 `generate_inner()` 函数,所有其他工作负载(数据下载、数据预处理、预测推理、度量计算)都由代码库处理。 + +**该代码库的设计目标不是:** + +复现所有**第三方基准测试**原始论文中报告的准确数字。有两个相关的原因: +1. VLMEvalKit 对所有 VLMs 使用基于生成的评估(可选使用基于 LLM 的答案提取)。同时,一些基准测试可能官方使用不同的方法(*例如,SEEDBench 使用基于 PPL 的评估*)。对于这些基准测试,我们在相应的结果中比较两个得分。我们鼓励开发人员在代码库中支持其他评估范式。 +2. 默认情况下,我们对所有多模态模型使用相同的提示模板来评估基准测试。同时,**一些多模态模型可能有他们特定的提示模板**(目前可能未在代码库中涵盖)。我们鼓励 VLM 的开发人员在 VLMEvalKit 中实现自己的提示模板,如果目前未覆盖。这将有助于提高可复现性。 + +## 🖊️ 引用 + +如果我们的工作对您有所帮助,请考虑 **star🌟** VLMEvalKit。感谢支持! + +[![Stargazers repo roster for @open-compass/VLMEvalKit](https://reporoster.com/stars/open-compass/VLMEvalKit)](https://github.com/open-compass/VLMEvalKit/stargazers) + +如果您在研究中使用了 VLMEvalKit,或希望参考已发布的开源评估结果,请使用以下 BibTeX 条目以及与您使用的特定 VLM / 基准测试相对应的 BibTex 条目。 + +```bib +@misc{duan2024vlmevalkit, + title={VLMEvalKit: An Open-Source Toolkit for Evaluating Large Multi-Modality Models}, + author={Haodong Duan and Junming Yang and Yuxuan Qiao and Xinyu Fang and Lin Chen and Yuan Liu and Xiaoyi Dong and Yuhang Zang and Pan Zhang and Jiaqi Wang and Dahua Lin and Kai Chen}, + year={2024}, + eprint={2407.11691}, + archivePrefix={arXiv}, + primaryClass={cs.CV}, + url={https://arxiv.org/abs/2407.11691}, +} +``` + +

🔝回到顶部

+ +[github-contributors-link]: https://github.com/open-compass/VLMEvalKit/graphs/contributors +[github-contributors-shield]: https://img.shields.io/github/contributors/open-compass/VLMEvalKit?color=c4f042&labelColor=black&style=flat-square +[github-forks-link]: https://github.com/open-compass/VLMEvalKit/network/members +[github-forks-shield]: https://img.shields.io/github/forks/open-compass/VLMEvalKit?color=8ae8ff&labelColor=black&style=flat-square +[github-issues-link]: https://github.com/open-compass/VLMEvalKit/issues +[github-issues-shield]: https://img.shields.io/github/issues/open-compass/VLMEvalKit?color=ff80eb&labelColor=black&style=flat-square +[github-license-link]: https://github.com/open-compass/VLMEvalKit/blob/main/LICENSE +[github-license-shield]: https://img.shields.io/github/license/open-compass/VLMEvalKit?color=white&labelColor=black&style=flat-square +[github-stars-link]: https://github.com/open-compass/VLMEvalKit/stargazers +[github-stars-shield]: https://img.shields.io/github/stars/open-compass/VLMEvalKit?color=ffcb47&labelColor=black&style=flat-square diff --git a/docs/zh-CN/_static/css/readthedocs.css b/docs/zh-CN/_static/css/readthedocs.css new file mode 100644 index 0000000000000000000000000000000000000000..c83beffd261d9d7cb79dc499aec7187474639d89 --- /dev/null +++ b/docs/zh-CN/_static/css/readthedocs.css @@ -0,0 +1,63 @@ +.header-logo { + background-image: url("../image/logo.svg"); + background-size: 275px 80px; + height: 80px; + width: 275px; +} + + +@media screen and (min-width: 1100px) { + .header-logo { + top: -25px; + } +} + +pre { + white-space: pre; +} + +@media screen and (min-width: 2000px) { + .pytorch-content-left { + width: 1200px; + margin-left: 30px; + } + article.pytorch-article { + max-width: 1200px; + } + .pytorch-breadcrumbs-wrapper { + width: 1200px; + } + .pytorch-right-menu.scrolling-fixed { + position: fixed; + top: 45px; + left: 1580px; + } +} + + +article.pytorch-article section code { + padding: .2em .4em; + background-color: #f3f4f7; + border-radius: 5px; +} + +/* Disable the change in tables */ +article.pytorch-article section table code { + padding: unset; + background-color: unset; + border-radius: unset; +} + +table.autosummary td { + width: 50% +} + +img.align-center { + display: block; + margin-left: auto; + margin-right: auto; +} + +article.pytorch-article p.rubric { + font-weight: bold; +} diff --git a/docs/zh-CN/_static/image/logo.svg b/docs/zh-CN/_static/image/logo.svg new file mode 100644 index 0000000000000000000000000000000000000000..043530572afb48d0eac26b4b53d448aae6e9a9af --- /dev/null +++ b/docs/zh-CN/_static/image/logo.svg @@ -0,0 +1,24 @@ + + + +Created with Fabric.js 5.3.0 + + + + + + + + + + + + + VLMEvalKit + diff --git a/docs/zh-CN/_static/image/logo_icon.svg b/docs/zh-CN/_static/image/logo_icon.svg new file mode 100644 index 0000000000000000000000000000000000000000..c46dd3b5407c1f82dce4f6096acf8c8a30a6cfba --- /dev/null +++ b/docs/zh-CN/_static/image/logo_icon.svg @@ -0,0 +1,31 @@ + + + + + + + + + + + + + + + + + + + diff --git a/docs/zh-CN/_static/js/custom.js b/docs/zh-CN/_static/js/custom.js new file mode 100644 index 0000000000000000000000000000000000000000..84da69d47fae8e8994685aca3b99151d01a77978 --- /dev/null +++ b/docs/zh-CN/_static/js/custom.js @@ -0,0 +1,10 @@ +var collapsedSections = []; + +$(document).ready(function () { + $('.model-summary').DataTable({ + "stateSave": false, + "lengthChange": false, + "pageLength": 20, + "order": [] + }); +}); diff --git a/docs/zh-CN/_templates/404.html b/docs/zh-CN/_templates/404.html new file mode 100644 index 0000000000000000000000000000000000000000..64910175d5d69946845b04d5e6a378de205e8388 --- /dev/null +++ b/docs/zh-CN/_templates/404.html @@ -0,0 +1,18 @@ +{% extends "layout.html" %} + +{% block body %} + +

Page Not Found

+

+ The page you are looking for cannot be found. +

+

+ If you just switched documentation versions, it is likely that the page you were on is moved. You can look for it in + the content table left, or go to the homepage. +

+ + +{% endblock %} diff --git a/docs/zh-CN/_templates/autosummary/class.rst b/docs/zh-CN/_templates/autosummary/class.rst new file mode 100644 index 0000000000000000000000000000000000000000..4c3a7a9abf5c5b14ac3ef3b00a2f070480295358 --- /dev/null +++ b/docs/zh-CN/_templates/autosummary/class.rst @@ -0,0 +1,13 @@ +.. role:: hidden + :class: hidden-section +.. currentmodule:: {{ module }} + + +{{ name | underline}} + +.. autoclass:: {{ name }} + :members: + +.. + autogenerated from _templates/autosummary/class.rst + note it does not have :inherited-members: diff --git a/docs/zh-CN/_templates/callable.rst b/docs/zh-CN/_templates/callable.rst new file mode 100644 index 0000000000000000000000000000000000000000..3a7b9d2b96c76dfa3eb1d8bef56f58f219fe7760 --- /dev/null +++ b/docs/zh-CN/_templates/callable.rst @@ -0,0 +1,14 @@ +.. role:: hidden + :class: hidden-section +.. currentmodule:: {{ module }} + + +{{ name | underline}} + +.. autoclass:: {{ name }} + :members: + :special-members: __call__ + +.. + autogenerated from _templates/callable.rst + note it does not have :inherited-members: diff --git a/docs/zh-CN/conf.py b/docs/zh-CN/conf.py new file mode 100644 index 0000000000000000000000000000000000000000..689daa6177913b918b6a01fe1e1ce5a6d4ca505f --- /dev/null +++ b/docs/zh-CN/conf.py @@ -0,0 +1,242 @@ +# flake8: noqa +# Configuration file for the Sphinx documentation builder. +# +# This file only contains a selection of the most common options. For a full +# list see the documentation: +# https://www.sphinx-doc.org/en/master/usage/configuration.html + +# -- Path setup -------------------------------------------------------------- + +# If extensions (or modules to document with autodoc) are in another directory, +# add these directories to sys.path here. If the directory is relative to the +# documentation root, use os.path.abspath to make it absolute, like shown here. +# +import os +import ast +import subprocess +import sys + +import pytorch_sphinx_theme +from sphinx.builders.html import StandaloneHTMLBuilder + +sys.path.insert(0, os.path.abspath('../../')) + +# -- Project information ----------------------------------------------------- + +project = 'VLMEvalKit' +copyright = '2023, VLMEvalKit' +author = 'VLMEvalKit Authors' + +# The full version, including alpha/beta/rc tags +version_file = '../../vlmeval/__init__.py' + + +def get_version(): + with open(version_file, 'r') as f: + file_content = f.read() + # Parse the file content into an abstract syntax tree (AST) + tree = ast.parse(file_content, filename=version_file) + + # Iterate through the body of the AST, looking for an assignment to __version__ + for node in tree.body: + if isinstance(node, ast.Assign): + for target in node.targets: + if isinstance(target, ast.Name) and target.id == '__version__': + return node.value.s + raise ValueError('__version__ not found') + + +release = get_version() + +# -- General configuration --------------------------------------------------- + +# Add any Sphinx extension module names here, as strings. They can be +# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom +# ones. +extensions = [ + 'sphinx.ext.autodoc', + 'sphinx.ext.autosummary', + 'sphinx.ext.intersphinx', + 'sphinx.ext.napoleon', + 'sphinx.ext.viewcode', + 'myst_parser', + 'sphinx_copybutton', + 'sphinx_tabs.tabs', + 'notfound.extension', + 'sphinxcontrib.jquery', + 'sphinx_design', +] + +# Add any paths that contain templates here, relative to this directory. +templates_path = ['_templates'] + +# The suffix(es) of source filenames. +# You can specify multiple suffix as a list of string: +# +source_suffix = { + '.rst': 'restructuredtext', + '.md': 'markdown', +} + +language = 'cn' + +# The master toctree document. +root_doc = 'index' +html_context = { + 'github_version': 'latest', +} +# List of patterns, relative to source directory, that match files and +# directories to ignore when looking for source files. +# This pattern also affects html_static_path and html_extra_path. +exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] + +# -- Options for HTML output ------------------------------------------------- + +# The theme to use for HTML and HTML Help pages. See the documentation for +# a list of builtin themes. +# +html_theme = 'pytorch_sphinx_theme' +html_theme_path = [pytorch_sphinx_theme.get_html_theme_path()] + +# Theme options are theme-specific and customize the look and feel of a theme +# further. For a list of options available for each theme, see the +# documentation. +# yapf: disable +html_theme_options = { + 'menu': [ + { + 'name': 'GitHub', + 'url': 'https://github.com/open-compass/VLMEvalKit' + }, + ], + # Specify the language of shared menu + 'menu_lang': 'cn', + # Disable the default edit on GitHub + 'default_edit_on_github': False, +} +# yapf: enable + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". +html_static_path = ['_static'] +html_css_files = [ + 'https://cdn.datatables.net/v/bs4/dt-1.12.1/datatables.min.css', + 'css/readthedocs.css' +] +html_js_files = [ + 'https://cdn.datatables.net/v/bs4/dt-1.12.1/datatables.min.js', + 'js/custom.js' +] + +# -- Options for HTMLHelp output --------------------------------------------- + +# Output file base name for HTML help builder. +htmlhelp_basename = 'vlmevalkitdoc' + +# -- Options for LaTeX output ------------------------------------------------ + +latex_elements = { + # The paper size ('letterpaper' or 'a4paper'). + # + # 'papersize': 'letterpaper', + + # The font size ('10pt', '11pt' or '12pt'). + # + # 'pointsize': '10pt', + + # Additional stuff for the LaTeX preamble. + # + # 'preamble': '', +} + +# Grouping the document tree into LaTeX files. List of tuples +# (source start file, target name, title, +# author, documentclass [howto, manual, or own class]). +latex_documents = [ + (root_doc, 'vlmevalkit.tex', 'VLMEvalKit Documentation', author, + 'manual'), +] + +# -- Options for manual page output ------------------------------------------ + +# One entry per manual page. List of tuples +# (source start file, name, description, authors, manual section). +man_pages = [(root_doc, 'vlmevalkit', 'VLMEvalKit Documentation', [author], + 1)] + +# -- Options for Texinfo output ---------------------------------------------- + +# Grouping the document tree into Texinfo files. List of tuples +# (source start file, target name, title, author, +# dir menu entry, description, category) +texinfo_documents = [ + (root_doc, 'vlmevalkit', 'VLMEvalKit Documentation', author, + 'VLMEvalKit Authors', 'AGI evaluation toolbox and benchmark.', + 'Miscellaneous'), +] + +# -- Options for Epub output ------------------------------------------------- + +# Bibliographic Dublin Core info. +epub_title = project + +# The unique identifier of the text. This can be a ISBN number +# or the project homepage. +# +# epub_identifier = '' + +# A unique identification for the text. +# +# epub_uid = '' + +# A list of files that should not be packed into the epub file. +epub_exclude_files = ['search.html'] + +# set priority when building html +StandaloneHTMLBuilder.supported_image_types = [ + 'image/svg+xml', 'image/gif', 'image/png', 'image/jpeg' +] + +# -- Extension configuration ------------------------------------------------- +# Ignore >>> when copying code +copybutton_prompt_text = r'>>> |\.\.\. ' +copybutton_prompt_is_regexp = True + +# Auto-generated header anchors +myst_heading_anchors = 3 +# Enable "colon_fence" extension of myst. +myst_enable_extensions = ['colon_fence', 'dollarmath'] + +# Configuration for intersphinx +intersphinx_mapping = { + 'python': ('https://docs.python.org/3', None), + 'numpy': ('https://numpy.org/doc/stable', None), + 'torch': ('https://pytorch.org/docs/stable/', None), + 'mmengine': ('https://mmengine.readthedocs.io/en/latest/', None), + 'transformers': + ('https://huggingface.co/docs/transformers/main/en/', None), +} +napoleon_custom_sections = [ + # Custom sections for data elements. + ('Meta fields', 'params_style'), + ('Data fields', 'params_style'), +] + +# Disable docstring inheritance +autodoc_inherit_docstrings = False +# Mock some imports during generate API docs. +autodoc_mock_imports = ['rich', 'attr', 'einops'] +# Disable displaying type annotations, these can be very verbose +autodoc_typehints = 'none' + +# The not found page +notfound_template = '404.html' + + +def builder_inited_handler(app): + subprocess.run(['./cp_origin_docs.sh']) + + +def setup(app): + app.connect('builder-inited', builder_inited_handler) diff --git a/docs/zh-CN/cp_origin_docs.sh b/docs/zh-CN/cp_origin_docs.sh new file mode 100644 index 0000000000000000000000000000000000000000..1e728323684a0aad1571eb392871d6c5de6644fc --- /dev/null +++ b/docs/zh-CN/cp_origin_docs.sh @@ -0,0 +1,9 @@ +#!/usr/bin/env bash + +# Copy *.md files from docs/ if it doesn't have a Chinese translation + +for filename in $(find ../en/ -name '*.md' -printf "%P\n"); +do + mkdir -p $(dirname $filename) + cp -n ../en/$filename ./$filename +done diff --git a/docs/zh-CN/docutils.conf b/docs/zh-CN/docutils.conf new file mode 100644 index 0000000000000000000000000000000000000000..0c00c84688701117f231fd0c8ec295fb747b7d8f --- /dev/null +++ b/docs/zh-CN/docutils.conf @@ -0,0 +1,2 @@ +[html writers] +table_style: colwidths-auto diff --git a/docs/zh-CN/index.rst b/docs/zh-CN/index.rst new file mode 100644 index 0000000000000000000000000000000000000000..5147a23b2052044664a987910d4bf04ccf286d14 --- /dev/null +++ b/docs/zh-CN/index.rst @@ -0,0 +1,49 @@ +欢迎来到 VLMEvalKit 中文教程! +========================================== + +VLMEvalKit 上手路线 +------------------------------- + +为了用户能够快速上手,我们推荐以下流程: + +- 对于想要使用 VLMEvalKit 的用户,我们推荐先阅读 开始你的第一步_ 部分来设置环境,并启动一个迷你实验熟悉流程。 + +- 若您想进行更多模块的自定义,例如增加数据集和模型,我们提供了 进阶教程_ 。 + +我们始终非常欢迎用户的 PRs 和 Issues 来完善 VLMEvalKit! + +.. _快速开始: +.. toctree:: + :maxdepth: 1 + :caption: 快速开始 + + Quickstart.md + + +.. .. _教程: +.. .. toctree:: +.. :maxdepth: 1 +.. :caption: 教程 + +.. user_guides/framework_overview.md + +.. _进阶教程: +.. toctree:: + :maxdepth: 1 + :caption: 进阶教程 + + Development.md + ConfigSystem.md + +.. .. _其他说明: +.. .. toctree:: +.. :maxdepth: 1 +.. :caption: 其他说明 + +.. notes/contribution_guide.md + +索引与表格 +================== + +* :ref:`genindex` +* :ref:`search` diff --git a/eval_only.py b/eval_only.py new file mode 100644 index 0000000000000000000000000000000000000000..7777619cd74c2441acc17893600f71a43b80ff31 --- /dev/null +++ b/eval_only.py @@ -0,0 +1,52 @@ +from vlmeval.dataset import build_dataset +from vlmeval.smp import * + + +load_env() +# dataset_name = "MMMU_DEV_VAL" +# dataset_name = "MathVista_MINI" +dataset_name = "DynaMath" +dataset = build_dataset(dataset_name) +judge_kwargs = { + 'nproc': 16, + 'verbose': True, + 'retry': 10, +} +if dataset.TYPE in ['MCQ', 'Y/N', 'MCQ_MMMU_Pro'] or listinstr(['moviechat1k'], dataset_name.lower()): + if listinstr(['WeMath'], dataset_name): + judge_kwargs['model'] = 'gpt-4o-mini' + else: + judge_kwargs['model'] = 'chatgpt-0125' +elif listinstr(['MMVet', 'LLaVABench', 'MMBench_Video'], dataset_name): + judge_kwargs['model'] = 'gpt-4-turbo' +elif listinstr(['MathVista', 'MathVerse', 'MathVision', 'DynaMath', 'VL-RewardBench', 'LogicVista', 'MOAT'], dataset_name): # noqa: E501 + judge_kwargs['model'] = 'gpt-4o-mini' +elif listinstr(['MMLongBench', 'MMDU', 'DUDE', 'SLIDEVQA', 'MIA-Bench', 'WildVision', 'MMAlignBench'], dataset_name): # noqa: E501 + judge_kwargs['model'] = 'gpt-4o' + +fs = [ + "/user/konglingyu/VLMEvalKit/public_eval/grpo_v7_exp0_qwen25vl_scalable_rl_opensource_math_grpo_bs96_wofilter_scoreB_std_filter_0523_global_step_200/DynaMath_train_prompt_greedy/20250524/grpo_v7_exp0_qwen25vl_scalable_rl_opensource_math_grpo_bs96_wofilter_scoreB_std_filter_0523_global_step_200/T20250524_G/grpo_v7_exp0_qwen25vl_scalable_rl_opensource_math_grpo_bs96_wofilter_scoreB_std_filter_0523_global_step_200_DynaMath.xlsx" + # "/user/konglingyu/VLMEvalKit/outputs/Qwen2.5-VL-7B-Instruct-original/T20250412_G/Qwen2.5-VL-7B-Instruct-original_DynaMath.xlsx", + # "/user/konglingyu/VLMEvalKit/outputs/Qwen2.5-VL-7B-RL-greedy/T20250414_G/Qwen2.5-VL-7B-RL-greedy_DynaMath.xlsx", + # "/user/konglingyu/VLMEvalKit/public_eval/bbox_step_300/DynaMath/20250418/bbox_step_300/T20250418_G/bbox_step_300_DynaMath.xlsx", + # "/user/konglingyu/VLMEvalKit/public_eval/clip_high_step_600/DynaMath/20250419/clip_high_step_600/T20250419_G/clip_high_step_600_DynaMath.xlsx", + # "/user/konglingyu/VLMEvalKit/public_eval/dr_grpo_step_600/DynaMath/20250419/dr_grpo_step_600/T20250419_G/dr_grpo_step_600_DynaMath.xlsx", + # "/user/konglingyu/VLMEvalKit/public_eval/dr_grpo_step_800/DynaMath/20250418/dr_grpo_step_800/T20250418_G/dr_grpo_step_800_DynaMath.xlsx", + # "/user/konglingyu/VLMEvalKit/public_eval/grpo_v7_exp9_qwen25vl_grpo_opensource_math_doc_dr_grpo_500/DynaMath/20250417/grpo_v7_exp9_qwen25vl_grpo_opensource_math_doc_dr_grpo/T20250417_G/grpo_v7_exp9_qwen25vl_grpo_opensource_math_doc_dr_grpo_DynaMath.xlsx", + # "/user/konglingyu/VLMEvalKit/public_eval/naive_grpo_step_400/DynaMath/20250418/naive_grpo_step_400/T20250418_G/naive_grpo_step_400_DynaMath.xlsx", +] +# file = "/user/konglingyu/VLMEvalKit/public_eval/bbox_step_300/DynaMath/20250418/bbox_step_300/T20250418_G/bbox_step_300_DynaMath.xlsx" +for file in fs: + try: + os.remove(file.replace(".xlsx", "_gpt-4o-mini_score.csv")) + os.remove(file.replace(".xlsx", "_gpt-4o-mini.pkl")) + os.remove(file.replace(".xlsx", "_gpt-4o-mini.xlsx")) + print("Removed old files") + except: + pass + dataset.evaluate(file, **judge_kwargs) + with open(file.replace(".xlsx", "_gpt-4o-mini_score.csv")) as f: + lines = f.readlines() + print(f"File: {file.split('/')[-1]}") + for line in lines: + print(line.strip()) \ No newline at end of file diff --git a/math_utils/__init__.py b/math_utils/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..ee173e06e909efa130f556b90583cf70f3d7b900 --- /dev/null +++ b/math_utils/__init__.py @@ -0,0 +1,498 @@ +""" +Answer checker API that uses sympy to simplify expressions and check for equality. + +Call grade_answer(given_answer: str, ground_truth: str). + +FROM: https://github.com/openai/prm800k/blob/main/prm800k/grading/grader.py +""" +import re +import sympy +import copy as cp +import string +from pylatexenc import latex2text +from sympy.parsing import sympy_parser +# import sys +# sys.path.append('/data/xuqixin/tablefactory/verl/utils/reward_score/') +from . import math_normalize +from .grader import math_equal +# import math_normalize +# from grader import math_equal + +# sympy might hang -- we don't care about trying to be lenient in these cases +BAD_SUBSTRINGS = ["^{", "^("] +BAD_REGEXES = ["\^[0-9]+\^", "\^[0-9][0-9]+"] +TUPLE_CHARS = "()[]" + +def list_to_dict(lst): + return {chr(65 + i): val for i, val in enumerate(lst)} + +def can_infer(answer, choices): + answer = str(answer) + copt = can_infer_option(answer, choices) + if copt: + return choices[copt] + else: + return answer # 选项的内容 + +def can_infer_option(answer, choices): + # Choices is a dictionary + if 'Failed to obtain answer via API' in answer: + return False + + reject_to_answer = [ + "Sorry, I can't help with images of people yet.", + "I can't process this file.", + "I'm sorry, but without the image provided", + 'Cannot determine the answer' + ] + for err in reject_to_answer: + if err in answer: + return 'Z' + + def count_choice(splits, choices, prefix='', suffix=''): + cnt = 0 + for c in choices: + if prefix + c + suffix in splits: + cnt += 1 + return cnt + + answer_mod = cp.copy(answer) + chars = '.()[],:;!*#{}' + for c in chars: + answer_mod = answer_mod.replace(c, ' ') + + splits = [x.strip() for x in answer_mod.split()] + count = count_choice(splits, choices) + + if count == 1: + for ch in choices: + if 'A' in splits and len(splits) > 3: + return False + if ch in splits: + return ch + elif count == 0 and count_choice(splits, {'Z', ''}) == 1: + return False + return False + + +def can_infer_text(answer, choices): + answer = answer.lower() + _, answer = match_answer(answer) + assert isinstance(choices, dict) + for k in choices: + assert k in string.ascii_uppercase # pip install string + choices[k] = str(choices[k]).lower() + cands = [] + for k in choices: + if choices[k] in answer or grade_answer(answer, choices[k]): + cands.append(choices[k]) + if len(cands) == 1: + return cands[0] + return False + +def _sympy_parse(expr: str): + """Parses an expression with sympy.""" + py_expr = expr.replace("^", "**") + return sympy_parser.parse_expr( + py_expr, + transformations=( + sympy_parser.standard_transformations + + (sympy_parser.implicit_multiplication_application,) + ), + ) + + +def _parse_latex(expr: str) -> str: + """Attempts to parse latex to an expression sympy can read.""" + expr = expr.replace("\\tfrac", "\\frac") + expr = expr.replace("\\dfrac", "\\frac") + expr = expr.replace("\\frac", " \\frac") # Play nice with mixed numbers. + expr = latex2text.LatexNodes2Text().latex_to_text(expr) + + # Replace the specific characters that this parser uses. + expr = expr.replace("√", "sqrt") + expr = expr.replace("\sqrt", "sqrt") + expr = expr.replace("π", "pi") + expr = expr.replace("∞", "inf") + expr = expr.replace("∪", "U") + expr = expr.replace("·", "*") + expr = expr.replace("×", "*") + + return expr.strip() + + +def _is_float(num: str) -> bool: + try: + float(num) + return True + except ValueError: + return False + + +def _is_int(x: float) -> bool: + try: + return abs(x - int(round(x))) <= 1e-7 + except: + return False + + +def _is_frac(expr: str) -> bool: + return bool(re.search(r"^-?[0-9]+.?/0*[1-9][0-9]*.?$", expr)) + + +def _str_is_int(x: str) -> bool: + try: + x = _strip_properly_formatted_commas(x) + x = float(x) + return abs(x - int(round(x))) <= 1e-7 + except: + return False + + +def _str_to_int(x: str) -> bool: + x = x.replace(",", "") + x = float(x) + return int(x) + + +def _inject_implicit_mixed_number(step: str): + """ + Automatically make a mixed number evalable + e.g. 7 3/4 => 7+3/4 + """ + p1 = re.compile("([0-9]) +([0-9])") + step = p1.sub("\\1+\\2", step) ## implicit mults + return step + + +def _strip_properly_formatted_commas(expr: str): + # We want to be careful because we don't want to strip tuple commas + p1 = re.compile("(\d)(,)(\d\d\d)($|\D)") + while True: + next_expr = p1.sub("\\1\\3\\4", expr) + if next_expr == expr: + break + expr = next_expr + return next_expr + + +def _normalize(expr: str) -> str: + """Normalize answer expressions.""" + if expr is None: + return None + + # Remove enclosing `\text{}`. + m = re.search("^\\\\text\{(?P.+?)\}$", expr) + if m is not None: + expr = m.group("text") + + expr = expr.replace("\\%", "%") + expr = expr.replace("\\$", "$") + expr = expr.replace("$", "") + expr = expr.replace("%", "") + expr = expr.replace("³", "") + expr = expr.replace("²", "") + expr = expr.replace("°", "") + expr = expr.replace(" or ", " , ") + expr = expr.replace(" and ", " , ") + + expr = expr.replace("million", "*10^6") + expr = expr.replace("billion", "*10^9") + expr = expr.replace("trillion", "*10^12") + + for unit in [ + "degree", + "cm", + "centimeter", + "meter", + "mile", + "second", + "minute", + "hour", + "day", + "week", + "month", + "year", + "foot", + "feet", + "inch", + "yard", + "liter", + ]: + expr = re.sub(f"{unit}(es)?(s)? *(\^[0-9]+)?", "", expr) + expr = re.sub(f"\^ *\\\\circ", "", expr) + # expr = re.sub(f"\^*\\\\circ", "", expr) + + if len(expr) > 0 and expr[0] == "{" and expr[-1] == "}": + expr = expr[1:-1] + + expr = re.sub(",\\\\! *", "", expr) + if _is_float(expr) and _is_int(float(expr)): + expr = str(int(round(float(expr)))) + if "\\" in expr: + try: + expr = _parse_latex(expr) + except: + pass + + # edge case with mixed numbers and negative signs + expr = re.sub("- *", "-", expr) + + expr = _inject_implicit_mixed_number(expr) + # expr = expr.replace(" ", "") + + # # if we somehow still have latex braces here, just drop them + # expr = expr.replace("{", "") + # expr = expr.replace("}", "") + + # don't be case sensitive for text answers + expr = expr.lower() + + if _str_is_int(expr): + expr = str(_str_to_int(expr)) + + return expr + + +def count_unknown_letters_in_expr(expr: str): + expr = expr.replace("sqrt", "") + expr = expr.replace("frac", "") + letters_in_expr = set([x for x in expr if x.isalpha()]) + return len(letters_in_expr) + + +def should_allow_eval(expr: str): + # we don't want to try parsing unknown text or functions of more than two variables + if count_unknown_letters_in_expr(expr) > 2: + return False + + for bad_string in BAD_SUBSTRINGS: + if bad_string in expr: + return False + + for bad_regex in BAD_REGEXES: + if re.search(bad_regex, expr) is not None: + return False + + return True + + +def are_equal_under_sympy(ground_truth_normalized: str, given_normalized: str): + are_equal = False + try: + expr = f"({ground_truth_normalized})-({given_normalized})" + if should_allow_eval(expr): + sympy_diff = _sympy_parse(expr) + simplified = sympy.simplify(sympy_diff) + if simplified == 0: + are_equal = True + except: + pass + return are_equal + + +def split_tuple(expr: str): + """ + Split the elements in a tuple/interval, while handling well-formatted commas in large numbers + """ + expr = _strip_properly_formatted_commas(expr) + if len(expr) == 0: + return [] + if ( + len(expr) > 2 + and expr[0] in TUPLE_CHARS + and expr[-1] in TUPLE_CHARS + and all([ch not in expr[1:-1] for ch in TUPLE_CHARS]) + ): + elems = [elem.strip() for elem in expr[1:-1].split(",")] + else: + elems = [expr] + return elems + + +def grade_answer(given_answer: str, ground_truth: str) -> bool: + """ + The answer will be considered correct if: + (a) it normalizes to the same string as the ground truth answer + OR + (b) sympy can simplify the difference between the expressions to 0 + """ + if given_answer is None: + return False + + ground_truth_normalized_mathd = math_normalize.normalize_answer(ground_truth) + given_answer_normalized_mathd = math_normalize.normalize_answer(given_answer) + + # be at least as lenient as mathd + if ground_truth_normalized_mathd == given_answer_normalized_mathd: + return True + + ground_truth_normalized = _normalize(ground_truth) + given_normalized = _normalize(given_answer) + + if ground_truth_normalized is None: + return False + + if ground_truth_normalized == given_normalized: + return True + + if len(given_normalized) == 0: + return False + + ground_truth_elems = split_tuple(ground_truth_normalized) + given_elems = split_tuple(given_normalized) + + if len(ground_truth_elems) > 1 and ( + ground_truth_normalized[0] != given_normalized[0] + or ground_truth_normalized[-1] != given_normalized[-1] + ): + is_correct = False + elif len(ground_truth_elems) != len(given_elems): + is_correct = False + else: + for ground_truth_elem, given_elem in zip(ground_truth_elems, given_elems): + if _is_frac(ground_truth_elem) and _is_frac(given_elem): + # if fractions aren't reduced, then shouldn't be marked as correct + # so, we don't want to allow sympy.simplify in this case + is_correct = ground_truth_elem == given_elem + elif _str_is_int(ground_truth_elem) != _str_is_int(given_elem): + # if the ground truth answer is an integer, we require the given answer to be a strict match (no sympy.simplify) + is_correct = False + else: + is_correct = are_equal_under_sympy(ground_truth_elem, given_elem) + if not is_correct: + break + + return is_correct + + + + + + + +def remove_boxed(s): + left = "\\boxed{" + try: + assert s[:len(left)] == left + assert s[-1] == "}" + return s[len(left):-1] + except: + return None + +def _last_boxed_only_string(string): + idx = string.rfind("\\boxed") + if idx < 0: + idx = string.rfind("\\fbox") + if idx < 0: + return None + + i = idx + left_brace_idx = None + right_brace_idx = None + num_left_braces_open = 0 + while i < len(string): + if string[i] == "{": + num_left_braces_open += 1 + if left_brace_idx is None: + left_brace_idx = i + elif string[i] == "}": + num_left_braces_open -= 1 + if num_left_braces_open == 0: + right_brace_idx = i + break + + i += 1 + + if left_brace_idx is None or right_brace_idx is None: + return None + + return string[left_brace_idx + 1: right_brace_idx].strip() + +def match_answer(response): + is_matched = False + for ans_marker in ['answer:', "answer is", "answers are"]: + ans_idx = response.lower().rfind(ans_marker) + if ans_idx != -1: + is_matched = True + response = response[ans_idx + len(ans_marker):].strip() + if response.endswith("\n"): + response = response[:-2] + + for ans_marker in ["is answer", "is the answer", "are answers", "are the answers"]: + ans_idx = response.lower().rfind(ans_marker) + if ans_idx != -1: + is_matched = True + response = response[:ans_idx].strip() + if response.endswith("\n"): + response = response[:-2] + + # Find boxed + ans_boxed = _last_boxed_only_string(response) + if ans_boxed: + is_matched = True + response = ans_boxed + + if ". " in response: + dot_idx = response.lower().rfind(". ") + if dot_idx != -1: + response = response[:dot_idx].strip() + + for ans_marker in ['be ', "is ", "are ", "=", ": ", "get ", 'be\n', "is\n", "are\n", ":\n", "get\n"]: + ans_idx = response.lower().rfind(ans_marker) + if ans_idx != -1: + is_matched = True + response = response[ans_idx + len(ans_marker):].strip() + if response.endswith("\n"): + response = response[:-2] + + is_matched = is_matched if any([c.isdigit() for c in response]) else False # answer must have a digit + return is_matched, response + +length_units = [ + " m", " cm", " mm", " km", " mi", " yd", " ft", + " nm", " µm" +] + +import math +def evaluate_math(model_output: str, ground_truth: str) -> bool: + model_output = str(model_output) + for unit in length_units: + if unit in model_output: + model_output = model_output.split(unit)[0].strip() + ground_truth = str(ground_truth) + for unit in length_units: + if unit in ground_truth: + ground_truth = ground_truth.split(unit)[0].strip() + + if model_output.lower() == ground_truth.lower(): + return True, model_output + + pattern = r"(\d+)\s*to\s*(\d+)|\[(\d+),\s*(\d+)\]" + import re + match1 = re.match(pattern, model_output) + match2 = re.match(pattern, ground_truth) + if match1 and match2: + return True, model_output + + is_matched, extracted_model_output = match_answer(model_output) + + # grade simple algebra questions. if succeed, return; otherwise, proceed to more complex grading + if grade_answer(extracted_model_output, ground_truth): + return True, extracted_model_output + # return True + + try: + if "\pi" in extracted_model_output or "\pi" in ground_truth: + equivs = [] + for pi in [math.pi, 3.14]: + equivs.append(math_equal(extracted_model_output, ground_truth, timeout=True, pi=pi)) + is_correct = any(equivs) + else: + is_correct = math_equal(extracted_model_output, ground_truth, timeout=True) + except: + is_correct = False + + print(f"{extracted_model_output=}\n", f"{model_output=}\n", f"{ground_truth=}\n") + + return is_correct, extracted_model_output \ No newline at end of file diff --git a/math_utils/grader.py b/math_utils/grader.py new file mode 100644 index 0000000000000000000000000000000000000000..c33b578b9f589cd7b6f2ea33e78aa18c092fa376 --- /dev/null +++ b/math_utils/grader.py @@ -0,0 +1,461 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Copyright (c) Microsoft Corporation. +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE + +# Copyright (c) 2023 OpenAI +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: + +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +# Copyright (c) 2021 Dan Hendrycks +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + + +""" +This logic is largely copied from the Hendrycks' MATH release (math_equivalence), and borrowed from: +- https://github.com/microsoft/ToRA/blob/main/src/eval/grader.py +- https://github.com/microsoft/ProphetNet/tree/master/CRITIC +- https://github.com/openai/prm800k +""" + + +import contextlib +import re +import signal +import math +from math import isclose +from typing import Union + +import sympy +from sympy import N, simplify +from sympy.parsing.latex import parse_latex +from sympy.parsing.sympy_parser import parse_expr + + +def is_digit(s): + try: + if "{,}" in str(s): + num = float(str(s).replace("{,}", "")) + return True, num + + num = float(str(s).replace(",", "")) + return True, num + except ValueError: + return False, None + + +def normalize(answer, pi) -> str: + # checking if answer is $ and removing $ in that case to compare + if isinstance(answer, str) and bool(re.match(r'\$\d+(\.\d+)?', answer)): + return answer[1:] + + # checking if answer is % or \\% and removing % + if isinstance(answer, str) and ( + bool(re.match(r'^\d+(\.\d+)?%$', answer)) or bool(re.match(r'^\d+(\.\d+)?\\%$', answer)) + ): + return answer.replace("\\%", "").replace("%", "") + + # handle base + answer = handle_base(answer) + + # handle pi + answer = handle_pi(answer, pi) + + return answer + +def handle_base(x) -> str: + if isinstance(x, str) and "_" in x: + # Due to base + x = x.split("_")[0] + x = float(x) + return int(x) + return x + + +def handle_pi(string, pi): + + if isinstance(string, str) and "\pi" in string: + # Find the first occurrence of "\pi" + idx = string.find("\pi") + + # Iterate over the string and find all occurrences of "\pi" with a valid previous character + while idx != -1: + + if idx > 0 and string[idx-1].isdigit(): + # Replace "\pi" with "*math.pi" if the previous character is a digit + string = string[:idx] + f"*{pi}" + string[idx+3:] + else: + # Replace "\pi" with "1*math.pi" if the previous character is not a digit + string = string[:idx] + f"1*{pi}" + string[idx+3:] + + # Find the next occurrence of "\pi" + idx = string.find("\pi", idx + 1) + + # Evaluate the expression using eval() function + try: + string = eval(string) + except: + pass + + return string + +def math_equal( + prediction: Union[bool, float, str], + reference: Union[float, str], + include_percentage: bool = True, + tolerance: float = 1e-4, + timeout: float = 10.0, + pi: float = math.pi +) -> bool: + """ + Exact match of math if and only if: + 1. numerical equal: both can convert to float and are equal + 2. symbolic equal: both can convert to sympy expression and are equal + """ + + prediction = normalize(prediction, pi) + reference = normalize(reference, pi) + + if isinstance(prediction, str) and len(prediction) > 1000: # handling weird corner-cases + prediction = prediction[:1000] + + # 0. string comparison + if isinstance(prediction, str) and isinstance(reference, str): + if prediction.strip().lower() == reference.strip().lower(): + return True + if prediction.replace(" ", "") == reference.replace(" ", ""): + return True + + try: # 1. numerical equal + if is_digit(prediction)[0] and is_digit(reference)[0]: + prediction = is_digit(prediction)[1] + reference = is_digit(reference)[1] + # number questions + if include_percentage: + gt_result = [reference / 100, reference, reference * 100] + else: + gt_result = [reference] + for item in gt_result: + try: + if isclose(item, prediction, rel_tol=tolerance): + return True + except Exception: + continue + return False + except Exception: + pass + + if not prediction and prediction not in [0, False]: + return False + + # 2. symbolic equal + reference = str(reference).strip() + prediction = str(prediction).strip() + + ## deal with [], (), {} + prediction = format_intervals(prediction) + + pred_str, ref_str = prediction, reference + if (prediction.startswith("[") and prediction.endswith("]") and not reference.startswith("(")) or ( + prediction.startswith("(") and prediction.endswith(")") and not reference.startswith("[") + ): + pred_str = pred_str.strip("[]()") + ref_str = ref_str.strip("[]()") + for s in ["{", "}", "(", ")"]: + ref_str = ref_str.replace(s, "") + pred_str = pred_str.replace(s, "") + if pred_str == ref_str: + return True + + ## [a, b] vs. [c, d], return a==c and b==d + if ( + prediction + and reference + and prediction[0] in "([" + and prediction[-1] in ")]" + and prediction[0] == reference[0] + and prediction[-1] == reference[-1] + ): + pred_parts = prediction[1:-1].split(",") + ref_parts = reference[1:-1].split(",") + if len(pred_parts) == len(ref_parts): + if all( + [ + math_equal(pred_pt, ref_pt, include_percentage, tolerance) + for pred_pt, ref_pt in zip(pred_parts, ref_parts) + ] + ): + return True + + if "," in prediction and "," in reference: + pred_parts = [item.strip() for item in prediction.split(",")] + ref_parts = [item.strip() for item in reference.split(",")] + + if len(pred_parts) == len(ref_parts): + if all( + [ + math_equal(pred_parts[i], ref_parts[i], include_percentage, tolerance) + for i in range(len(pred_parts)) + ] + ): + return True + else: + return False + + # if we have point == tuple of values + if prediction.startswith("Point") and reference[0] == "(" and reference[-1] == ")": + pred_parts = prediction[prediction.find("(") + 1 : -1].split(",") + ref_parts = reference[1:-1].split(",") + if len(pred_parts) == len(ref_parts): + if all( + [ + math_equal(pred_pt, ref_pt, include_percentage, tolerance) + for pred_pt, ref_pt in zip(pred_parts, ref_parts) + ] + ): + return True + + # if reference is a matrix + if "\begin{pmatrix}" in reference and prediction.startswith("Matrix"): + try: + pred_matrix = parse_expr(prediction) + ref_matrix_items = reference.split()[1:-1:2] + if len(pred_matrix) == len(ref_matrix_items): + if all( + [ + math_equal(pred, ref, include_percentage, tolerance) + for ref, pred in zip(ref_matrix_items, pred_matrix) + ] + ): + return True + except Exception: + pass + elif "\begin{pmatrix}" in reference and prediction.startswith("[") and prediction.endswith("]"): + if isinstance(eval(prediction), list): + try: + pred_matrix = eval(prediction) + # ref_matrix_items = reference.split()[1:-1:2] + ref_matrix_items = reference.lstrip("\\begin{pmatrix}").lstrip("\begin{pmatrix}").rstrip("\\end{pmatrix}").rstrip("\end{pmatrix}") + ref_matrix_items = ref_matrix_items.split("\\") + ref_matrix_items = [row.split("&") if "&" in row else row for row in ref_matrix_items] + if len(pred_matrix) == len(ref_matrix_items): + if all( + [ + math_equal(pred, ref, include_percentage, tolerance) + for ref, pred in zip(ref_matrix_items, pred_matrix) + ] + ): + return True + except Exception: + pass + + return symbolic_equal(prediction, reference, tolerance, timeout) + + +def symbolic_equal(a, b, tolerance, timeout=10.0): + def _parse(s): + for f in [parse_expr, parse_latex]: + try: + with time_limit(timeout): + return f(s) + except Exception: + pass + return s + + a = _parse(a) + b = _parse(b) + + try: + with time_limit(timeout): + if simplify(a - b) == 0: + return True + except Exception: + pass + + try: + with time_limit(timeout): + if isclose(N(a), N(b), rel_tol=tolerance): + return True + except Exception: + pass + return False + + +def extract_answer(string): + """Extract Answer String from \\boxed expression.""" + idx = string.rfind("\\boxed") + if idx < 0: + idx = string.rfind("\\fbox") + if idx < 0: + return None + + i = idx + right_brace_idx = None + num_left_braces_open = 0 + while i < len(string): + if string[i] == "{": + num_left_braces_open += 1 + if string[i] == "}": + num_left_braces_open -= 1 + if num_left_braces_open == 0: + right_brace_idx = i + break + i += 1 + + if right_brace_idx is None: + retval = None + else: + retval = string[idx : right_brace_idx + 1] + + if retval: + left = "\\boxed{" + try: + assert retval[: len(left)] == left + assert retval[-1] == "}" + return retval[len(left) : -1] + except AssertionError: + return None + + return None + + +class TimeoutException(Exception): + pass + + +@contextlib.contextmanager +def time_limit(seconds: float): + def signal_handler(signum, frame): + raise TimeoutException("Timed out!") + + signal.setitimer(signal.ITIMER_REAL, seconds) + signal.signal(signal.SIGALRM, signal_handler) + try: + yield + finally: + signal.setitimer(signal.ITIMER_REAL, 0) + + +def format_intervals(prediction): + patterns = { + "Interval(": r"^Interval\((.*)\)$", + "Interval.Ropen(": r"^Interval\.Ropen\((.*)\)$", + "Interval.Lopen(": r"^Interval\.Lopen\((.*)\)$", + "Interval.open(": r"^Interval\.open\((.*)\)$", + } + + for key, pattern in patterns.items(): + match = re.match(pattern, prediction) + if match: + inner_content = match.group(1) + + if key == "Interval(": # Intarval(a, b) == [a, b] + return f"[{inner_content}]" + elif key == "Interval.Ropen(": # Intarval.Ropen(a, b) == [a, b) + return f"[{inner_content})" + elif key == "Interval.Lopen(": # Intarval.Lopen(a, b) == (a, b] + return f"({inner_content}]" + elif key == "Interval.open(": # Intarval.open(a, b) == (a, b) + return f"({inner_content})" + + return prediction + + +def _test_math_equal(): + ref = "6,-2" + pred = "6" + print(math_equal(ref, pred)) + +def _test_math_equal(): + pi = math.pi + ref = "900\pi" + pred = 812.0 + print(math_equal(pred, ref, pi=pi)) + + ref = "25\pi" + pred = 78.5 + print(math_equal(pred, ref, pi=pi)) + + ref = "90\pi" + pred = 282.6 + print(math_equal(pred, ref, pi=pi)) + + ref = "24+4\pi" + pred = 36.57142857142857 + print(math_equal(pred, ref, pi=pi)) + + ref = "9\pi" + pred = 28.274309999999993 + print(math_equal(pred, ref, pi=pi)) + + +def _test_math_equal(): + ref = "\\begin{pmatrix}0&1\\1&0\end{pmatrix}" + # ref=ref.split()[1:-1:2] + pred = [[0,1], [1,0]] + print(math_equal(pred, ref)) + +if __name__ == "__main__": + _test_math_equal() diff --git a/math_utils/math_normalize.py b/math_utils/math_normalize.py new file mode 100644 index 0000000000000000000000000000000000000000..6d30af347c60b48b9fef50c81cc77cbc31f337f0 --- /dev/null +++ b/math_utils/math_normalize.py @@ -0,0 +1,163 @@ +""" +This logic is largely copied from the Hendrycks' MATH release (math_equivalence). + +From: https://github.com/openai/prm800k/blob/main/prm800k/grading/math_normalize.py +""" +import re +from typing import Optional + + +def normalize_answer(answer: Optional[str]) -> Optional[str]: + if answer is None: + return None + answer = answer.strip() + try: + # Remove enclosing `\text{}`. + m = re.search("^\\\\text\{(?P.+?)\}$", answer) + if m is not None: + answer = m.group("text").strip() + return _strip_string(answer) + except: + return answer + + +def _fix_fracs(string): + substrs = string.split("\\frac") + new_str = substrs[0] + if len(substrs) > 1: + substrs = substrs[1:] + for substr in substrs: + new_str += "\\frac" + if substr[0] == "{": + new_str += substr + else: + try: + assert len(substr) >= 2 + except: + return string + a = substr[0] + b = substr[1] + if b != "{": + if len(substr) > 2: + post_substr = substr[2:] + new_str += "{" + a + "}{" + b + "}" + post_substr + else: + new_str += "{" + a + "}{" + b + "}" + else: + if len(substr) > 2: + post_substr = substr[2:] + new_str += "{" + a + "}" + b + post_substr + else: + new_str += "{" + a + "}" + b + string = new_str + return string + + +def _fix_a_slash_b(string): + if len(string.split("/")) != 2: + return string + a = string.split("/")[0] + b = string.split("/")[1] + try: + a = int(a) + b = int(b) + assert string == "{}/{}".format(a, b) + new_string = "\\frac{" + str(a) + "}{" + str(b) + "}" + return new_string + except: + return string + + +def _remove_right_units(string): + # "\\text{ " only ever occurs (at least in the val set) when describing units + if "\\text{ " in string: + splits = string.split("\\text{ ") + assert len(splits) == 2 + return splits[0] + else: + return string + + +def _fix_sqrt(string): + if "\\sqrt" not in string: + return string + splits = string.split("\\sqrt") + new_string = splits[0] + for split in splits[1:]: + if split[0] != "{": + a = split[0] + new_substr = "\\sqrt{" + a + "}" + split[1:] + else: + new_substr = "\\sqrt" + split + new_string += new_substr + return new_string + + +def _strip_string(string): + # linebreaks + string = string.replace("\n", "") + # print(string) + + # remove inverse spaces + string = string.replace("\\!", "") + # print(string) + + # replace \\ with \ + string = string.replace("\\\\", "\\") + # print(string) + + # replace tfrac and dfrac with frac + string = string.replace("tfrac", "frac") + string = string.replace("dfrac", "frac") + # print(string) + + # remove \left and \right + string = string.replace("\\left", "") + string = string.replace("\\right", "") + # print(string) + + # Remove circ (degrees) + string = string.replace("^{\\circ}", "") + string = string.replace("^\\circ", "") + + # remove dollar signs + string = string.replace("\\$", "") + + # remove units (on the right) + string = _remove_right_units(string) + + # remove percentage + string = string.replace("\\%", "") + string = string.replace("\%", "") + + # " 0." equivalent to " ." and "{0." equivalent to "{." Alternatively, add "0" if "." is the start of the string + string = string.replace(" .", " 0.") + string = string.replace("{.", "{0.") + # if empty, return empty string + if len(string) == 0: + return string + if string[0] == ".": + string = "0" + string + + # to consider: get rid of e.g. "k = " or "q = " at beginning + if len(string.split("=")) == 2: + if len(string.split("=")[0]) <= 2: + string = string.split("=")[1] + + # fix sqrt3 --> sqrt{3} + string = _fix_sqrt(string) + + # remove spaces + string = string.replace(" ", "") + + # \frac1b or \frac12 --> \frac{1}{b} and \frac{1}{2}, etc. Even works with \frac1{72} (but not \frac{72}1). Also does a/b --> \\frac{a}{b} + string = _fix_fracs(string) + + # manually change 0.5 --> \frac{1}{2} + if string == "0.5": + string = "\\frac{1}{2}" + + # NOTE: X/Y changed to \frac{X}{Y} in dataset, but in simple cases fix in case the model output is X/Y + string = _fix_a_slash_b(string) + + return string \ No newline at end of file diff --git a/math_utils/test.py b/math_utils/test.py new file mode 100644 index 0000000000000000000000000000000000000000..8b5aea6a8343ce0c8f7395d9eedc2e06351dfc81 --- /dev/null +++ b/math_utils/test.py @@ -0,0 +1,428 @@ +""" +Answer checker API that uses sympy to simplify expressions and check for equality. + +Call grade_answer(given_answer: str, ground_truth: str). + +FROM: https://github.com/openai/prm800k/blob/main/prm800k/grading/grader.py +""" +import re +import sympy +from pylatexenc import latex2text +from sympy.parsing import sympy_parser +import sys +sys.path.append('/data/xuqixin/tablefactory/verl/utils/reward_score/') +from math_utils import math_normalize +from math_utils.grader import math_equal +# import math_normalize +# from grader import math_equal + +# sympy might hang -- we don't care about trying to be lenient in these cases +BAD_SUBSTRINGS = ["^{", "^("] +BAD_REGEXES = ["\^[0-9]+\^", "\^[0-9][0-9]+"] +TUPLE_CHARS = "()[]" + + +def _sympy_parse(expr: str): + """Parses an expression with sympy.""" + py_expr = expr.replace("^", "**") + return sympy_parser.parse_expr( + py_expr, + transformations=( + sympy_parser.standard_transformations + + (sympy_parser.implicit_multiplication_application,) + ), + ) + + +def _parse_latex(expr: str) -> str: + """Attempts to parse latex to an expression sympy can read.""" + expr = expr.replace("\\tfrac", "\\frac") + expr = expr.replace("\\dfrac", "\\frac") + expr = expr.replace("\\frac", " \\frac") # Play nice with mixed numbers. + expr = latex2text.LatexNodes2Text().latex_to_text(expr) + + # Replace the specific characters that this parser uses. + expr = expr.replace("√", "sqrt") + expr = expr.replace("π", "pi") + expr = expr.replace("∞", "inf") + expr = expr.replace("∪", "U") + expr = expr.replace("·", "*") + expr = expr.replace("×", "*") + + return expr.strip() + + +def _is_float(num: str) -> bool: + try: + float(num) + return True + except ValueError: + return False + + +def _is_int(x: float) -> bool: + try: + return abs(x - int(round(x))) <= 1e-7 + except: + return False + + +def _is_frac(expr: str) -> bool: + return bool(re.search(r"^-?[0-9]+.?/0*[1-9][0-9]*.?$", expr)) + + +def _str_is_int(x: str) -> bool: + try: + x = _strip_properly_formatted_commas(x) + x = float(x) + return abs(x - int(round(x))) <= 1e-7 + except: + return False + + +def _str_to_int(x: str) -> bool: + x = x.replace(",", "") + x = float(x) + return int(x) + + +def _inject_implicit_mixed_number(step: str): + """ + Automatically make a mixed number evalable + e.g. 7 3/4 => 7+3/4 + """ + p1 = re.compile("([0-9]) +([0-9])") + step = p1.sub("\\1+\\2", step) ## implicit mults + return step + + +def _strip_properly_formatted_commas(expr: str): + # We want to be careful because we don't want to strip tuple commas + p1 = re.compile("(\d)(,)(\d\d\d)($|\D)") + while True: + next_expr = p1.sub("\\1\\3\\4", expr) + if next_expr == expr: + break + expr = next_expr + return next_expr + + +def _normalize(expr: str) -> str: + """Normalize answer expressions.""" + if expr is None: + return None + + # Remove enclosing `\text{}`. + m = re.search("^\\\\text\{(?P.+?)\}$", expr) + if m is not None: + expr = m.group("text") + + expr = expr.replace("\\%", "%") + expr = expr.replace("\\$", "$") + expr = expr.replace("$", "") + expr = expr.replace("%", "") + expr = expr.replace("³", "") + expr = expr.replace("²", "") + expr = expr.replace("°", "") + expr = expr.replace(" or ", " , ") + expr = expr.replace(" and ", " , ") + + expr = expr.replace("million", "*10^6") + expr = expr.replace("billion", "*10^9") + expr = expr.replace("trillion", "*10^12") + + for unit in [ + "degree", + "cm", + "centimeter", + "meter", + "mile", + "second", + "minute", + "hour", + "day", + "week", + "month", + "year", + "foot", + "feet", + "inch", + "yard", + "liter", + ]: + expr = re.sub(f"{unit}(es)?(s)? *(\^[0-9]+)?", "", expr) + expr = re.sub(f"\^ *\\\\circ", "", expr) + # expr = re.sub(f"\^*\\\\circ", "", expr) + + if len(expr) > 0 and expr[0] == "{" and expr[-1] == "}": + expr = expr[1:-1] + + expr = re.sub(",\\\\! *", "", expr) + if _is_float(expr) and _is_int(float(expr)): + expr = str(int(round(float(expr)))) + if "\\" in expr: + try: + expr = _parse_latex(expr) + except: + pass + + # edge case with mixed numbers and negative signs + expr = re.sub("- *", "-", expr) + + expr = _inject_implicit_mixed_number(expr) + # expr = expr.replace(" ", "") + + # # if we somehow still have latex braces here, just drop them + # expr = expr.replace("{", "") + # expr = expr.replace("}", "") + + # don't be case sensitive for text answers + expr = expr.lower() + + if _str_is_int(expr): + expr = str(_str_to_int(expr)) + + return expr + + +def count_unknown_letters_in_expr(expr: str): + expr = expr.replace("sqrt", "") + expr = expr.replace("frac", "") + letters_in_expr = set([x for x in expr if x.isalpha()]) + return len(letters_in_expr) + + +def should_allow_eval(expr: str): + # we don't want to try parsing unknown text or functions of more than two variables + if count_unknown_letters_in_expr(expr) > 2: + return False + + for bad_string in BAD_SUBSTRINGS: + if bad_string in expr: + return False + + for bad_regex in BAD_REGEXES: + if re.search(bad_regex, expr) is not None: + return False + + return True + + +def are_equal_under_sympy(ground_truth_normalized: str, given_normalized: str): + are_equal = False + try: + expr = f"({ground_truth_normalized})-({given_normalized})" + if should_allow_eval(expr): + sympy_diff = _sympy_parse(expr) + simplified = sympy.simplify(sympy_diff) + if simplified == 0: + are_equal = True + except: + pass + return are_equal + + +def split_tuple(expr: str): + """ + Split the elements in a tuple/interval, while handling well-formatted commas in large numbers + """ + expr = _strip_properly_formatted_commas(expr) + if len(expr) == 0: + return [] + if ( + len(expr) > 2 + and expr[0] in TUPLE_CHARS + and expr[-1] in TUPLE_CHARS + and all([ch not in expr[1:-1] for ch in TUPLE_CHARS]) + ): + elems = [elem.strip() for elem in expr[1:-1].split(",")] + else: + elems = [expr] + return elems + + +def grade_answer(given_answer: str, ground_truth: str) -> bool: + """ + The answer will be considered correct if: + (a) it normalizes to the same string as the ground truth answer + OR + (b) sympy can simplify the difference between the expressions to 0 + """ + if given_answer is None: + return False + + ground_truth_normalized_mathd = math_normalize.normalize_answer(ground_truth) + given_answer_normalized_mathd = math_normalize.normalize_answer(given_answer) + + # be at least as lenient as mathd + if ground_truth_normalized_mathd == given_answer_normalized_mathd: + return True + + ground_truth_normalized = _normalize(ground_truth) + given_normalized = _normalize(given_answer) + + if ground_truth_normalized is None: + return False + + if ground_truth_normalized == given_normalized: + return True + + if len(given_normalized) == 0: + return False + + ground_truth_elems = split_tuple(ground_truth_normalized) + given_elems = split_tuple(given_normalized) + + if len(ground_truth_elems) > 1 and ( + ground_truth_normalized[0] != given_normalized[0] + or ground_truth_normalized[-1] != given_normalized[-1] + ): + is_correct = False + elif len(ground_truth_elems) != len(given_elems): + is_correct = False + else: + for ground_truth_elem, given_elem in zip(ground_truth_elems, given_elems): + if _is_frac(ground_truth_elem) and _is_frac(given_elem): + # if fractions aren't reduced, then shouldn't be marked as correct + # so, we don't want to allow sympy.simplify in this case + is_correct = ground_truth_elem == given_elem + elif _str_is_int(ground_truth_elem) != _str_is_int(given_elem): + # if the ground truth answer is an integer, we require the given answer to be a strict match (no sympy.simplify) + is_correct = False + else: + is_correct = are_equal_under_sympy(ground_truth_elem, given_elem) + if not is_correct: + break + + return is_correct + + + + + + + +def remove_boxed(s): + left = "\\boxed{" + try: + assert s[:len(left)] == left + assert s[-1] == "}" + return s[len(left):-1] + except: + return None + +def _last_boxed_only_string(string): + idx = string.rfind("\\boxed") + if idx < 0: + idx = string.rfind("\\fbox") + if idx < 0: + return None + + i = idx + left_brace_idx = None + right_brace_idx = None + num_left_braces_open = 0 + while i < len(string): + if string[i] == "{": + num_left_braces_open += 1 + if left_brace_idx is None: + left_brace_idx = i + elif string[i] == "}": + num_left_braces_open -= 1 + if num_left_braces_open == 0: + right_brace_idx = i + break + + i += 1 + + if left_brace_idx is None or right_brace_idx is None: + return None + + return string[left_brace_idx + 1: right_brace_idx].strip() + +def match_answer(response): + is_matched = False + for ans_marker in ['answer:', "answer is", "answers are"]: + ans_idx = response.lower().rfind(ans_marker) + if ans_idx != -1: + is_matched = True + response = response[ans_idx + len(ans_marker):].strip() + if response.endswith("\n"): + response = response[:-2] + + for ans_marker in ["is answer", "is the answer", "are answers", "are the answers"]: + ans_idx = response.lower().rfind(ans_marker) + if ans_idx != -1: + is_matched = True + response = response[:ans_idx].strip() + if response.endswith("\n"): + response = response[:-2] + + # Find boxed + ans_boxed = _last_boxed_only_string(response) + if ans_boxed: + is_matched = True + response = ans_boxed + + if ". " in response: + dot_idx = response.lower().rfind(". ") + if dot_idx != -1: + response = response[:dot_idx].strip() + + for ans_marker in ['be ', "is ", "are ", "=", ": ", "get ", 'be\n', "is\n", "are\n", ":\n", "get\n"]: + ans_idx = response.lower().rfind(ans_marker) + if ans_idx != -1: + is_matched = True + response = response[ans_idx + len(ans_marker):].strip() + if response.endswith("\n"): + response = response[:-2] + + is_matched = is_matched if any([c.isdigit() for c in response]) else False # answer must have a digit + return is_matched, response + +length_units = [ + " m", " cm", " mm", " km", " mi", " yd", " ft", + " nm", " µm" +] + +import math +def evaluate_math(model_output: str, ground_truth: str) -> bool: + model_output = str(model_output) + for unit in length_units: + if unit in model_output: + model_output = model_output.split(unit)[0].strip() + ground_truth = str(ground_truth) + for unit in length_units: + if unit in ground_truth: + ground_truth = ground_truth.split(unit)[0].strip() + + if model_output == "False" and ground_truth == "No": + return True, model_output + if model_output.lower() == ground_truth.lower(): + return True, model_output + + is_matched, extracted_model_output = match_answer(model_output) + + # grade simple algebra questions. if succeed, return; otherwise, proceed to more complex grading + if grade_answer(extracted_model_output, ground_truth): + return True, extracted_model_output + # return True + + try: + if "\pi" in extracted_model_output or "\pi" in ground_truth: + equivs = [] + for pi in [math.pi, 3.14]: + equivs.append(math_equal(extracted_model_output, ground_truth, timeout=True, pi=pi)) + is_correct = any(equivs) + else: + is_correct = math_equal(extracted_model_output, ground_truth, timeout=True) + except: + is_correct = False + + # print(f"{extracted_model_output=}\n", f"{model_output=}\n", f"{ground_truth=}\n") + + return is_correct, extracted_model_output + +is_corr= evaluate_math(model_output="45", ground_truth="45°") + +print(is_corr) diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..fff1f5cb596a242908d665409d60f8ac53424bc5 --- /dev/null +++ b/requirements.txt @@ -0,0 +1,31 @@ +decord; platform_machine != 'arm64' +eva-decord; platform_machine == 'arm64' +gradio +huggingface_hub +imageio +matplotlib +numpy +omegaconf +openai +opencv-python>=4.4.0.46 +openpyxl +pandas +pillow +portalocker +protobuf +python-dotenv +requests +rich +sentencepiece +setuptools +sty +tabulate +tiktoken +timeout-decorator +torch +torchvision +tqdm +transformers +typing_extensions +validators +xlsxwriter diff --git a/requirements/docs.txt b/requirements/docs.txt new file mode 100644 index 0000000000000000000000000000000000000000..02587e64881bbf53f7e29ee780a75a728cc8ac2f --- /dev/null +++ b/requirements/docs.txt @@ -0,0 +1,11 @@ +docutils==0.18.1 +modelindex +myst-parser +-e git+https://github.com/open-compass/pytorch_sphinx_theme.git#egg=pytorch_sphinx_theme +sphinx==6.1.3 +sphinx-copybutton +sphinx-design +sphinx-notfound-page +sphinx-tabs +sphinxcontrib-jquery +tabulate diff --git a/run.py b/run.py new file mode 100644 index 0000000000000000000000000000000000000000..601a10c552ebf9da8780566f6c19f0911b8181fe --- /dev/null +++ b/run.py @@ -0,0 +1,442 @@ +import json + +import torch +import torch.distributed as dist + +from vlmeval.config import supported_VLM +from vlmeval.dataset.video_dataset_config import supported_video_datasets +from vlmeval.dataset import build_dataset +from vlmeval.inference import infer_data_job +from vlmeval.inference_video import infer_data_job_video +from vlmeval.inference_mt import infer_data_job_mt +from vlmeval.smp import * +from vlmeval.utils.result_transfer import MMMU_result_transfer, MMTBench_result_transfer + +def build_model_from_config(cfg, model_name): + import vlmeval.api + import vlmeval.vlm + config = cp.deepcopy(cfg[model_name]) + if config == {}: + return supported_VLM[model_name]() + assert 'class' in config + cls_name = config.pop('class') + if hasattr(vlmeval.api, cls_name): + return getattr(vlmeval.api, cls_name)(**config) + elif hasattr(vlmeval.vlm, cls_name): + return getattr(vlmeval.vlm, cls_name)(**config) + else: + raise ValueError(f'Class {cls_name} is not supported in `vlmeval.api` or `vlmeval.vlm`') + + +def build_dataset_from_config(cfg, dataset_name): + import vlmeval.dataset + import inspect + config = cp.deepcopy(cfg[dataset_name]) + if config == {}: + return supported_video_datasets[dataset_name]() + assert 'class' in config + cls_name = config.pop('class') + if hasattr(vlmeval.dataset, cls_name): + cls = getattr(vlmeval.dataset, cls_name) + sig = inspect.signature(cls.__init__) + valid_params = {k: v for k, v in config.items() if k in sig.parameters} + if cls.MODALITY == 'VIDEO': + if valid_params.get('fps', 0) > 0 and valid_params.get('nframe', 0) > 0: + raise ValueError('fps and nframe should not be set at the same time') + if valid_params.get('fps', 0) <= 0 and valid_params.get('nframe', 0) <= 0: + raise ValueError('fps and nframe should be set at least one valid value') + return cls(**valid_params) + else: + raise ValueError(f'Class {cls_name} is not supported in `vlmeval.dataset`') + + + +def parse_args(): + help_msg = """\ +You can launch the evaluation by setting either --data and --model or --config. + +--data and --model: + Each Arg should be a list of strings, specifying the names of datasets and models. + To find all supported model names, please refer to the `vlmeval/config.py` of check the output of the command \ + `vlmutil mlist all` in the terminal (you should first have vlmeval installed). + To find all supported dataset names, please refer to the `vlmeval/dataset/__init__.py` file. The python script \ + to print all supported dataset names is as follows: + ```python + from vlmeval.dataset import SUPPORTED_DATASETS + print(SUPPORTED_DATASETS) + ``` + or you can check the output of the command `vlmutil dlist all` in the terminal. + To find all supported video dataset default settings, please refer to the \ + `vlmeval/dataset/video_dataset_config.py` file. + +--config: + Launch the evaluation by specifying the path to the config json file. Sample Json Content: + ```json + { + "model": { + "GPT4o_20240806_T00_HIGH": { + "class": "GPT4V", + "model": "gpt-4o-2024-08-06", + "temperature": 0, + "img_detail": "high" + }, + "GPT4o_20240806_T10_Low": { + "class": "GPT4V", + "model": "gpt-4o-2024-08-06", + "temperature": 1.0, + "img_detail": "low" + }, + "GPT4o_20241120": {} + }, + "data": { + "MME-RealWorld-Lite": { + "class": "MMERealWorld", + "dataset": "MME-RealWorld-Lite" + }, + "MMBench_DEV_EN_V11": { + "class": "ImageMCQDataset", + "dataset": "MMBench_DEV_EN_V11" + }, + "MMBench_Video_8frame_nopack": {}, + "Video-MME_16frame_subs": { + "class": "VideoMME", + "dataset": "Video-MME", + "nframe": 16, + "use_subtitle": true, + } + } + } + ``` + Currently, only `model` and `data` are supported fields. The content of each field is a dictionary. + For `model`, the key is the name of the model, and the value is a dictionary containing the following keys: + - `class`: The class name of the model, which should be a class in `vlmeval.vlm` or `vlmeval.api`. + - Other keys are specific to the model, please refer to the corresponding class. + - Tip: The defined model in the `supported_VLM` of `vlmeval/config.py` can be used as a shortcut. + For `data`, the key is the name of the dataset (should be the same as the `dataset` field in most cases, \ + except for video datasets), and the value is a dictionary containing the following keys: + - `class`: The class name of the dataset, which should be a class in `vlmeval.dataset`. + - `dataset`: The name of the dataset, which should be a string that is accepted by the `dataset` argument of the \ + corresponding class. + - Other keys are specific to the dataset, please refer to the corresponding class. + - Tip: The defined dataset in the `supported_video_datasets` of `vlmeval/dataset/video_dataset_config.py` \ + can be used as a shortcut. + + The keys in the `model` and `data` fields will be used for naming the prediction files and evaluation results. + When launching with `--config`, args for API VLMs, such as `--retry`, `--verbose`, will be ignored. +""" + parser = argparse.ArgumentParser(description=help_msg, formatter_class=argparse.RawTextHelpFormatter) + # Essential Args, Setting the Names of Datasets and Models + parser.add_argument('--data', type=str, nargs='+', help='Names of Datasets') + parser.add_argument('--model', type=str, nargs='+', help='Names of Models') + parser.add_argument('--config', type=str, help='Path to the Config Json File') + # Work Dir + parser.add_argument('--work-dir', type=str, default='./outputs', help='select the output directory') + # Infer + Eval or Infer Only + parser.add_argument('--mode', type=str, default='all', choices=['all', 'infer']) + # API Kwargs, Apply to API VLMs and Judge API LLMs + parser.add_argument('--api-nproc', type=int, default=16, help='Parallel API calling') + parser.add_argument('--retry', type=int, default=10, help='retry numbers for API VLMs') + parser.add_argument('--judge-args', type=str, default=None, help='Judge arguments in JSON format') + # Explicitly Set the Judge Model + parser.add_argument('--judge', type=str, default=None) + # Logging Utils + parser.add_argument('--verbose', action='store_true') + # Configuration for Resume + # Ignore: will not rerun failed VLM inference + parser.add_argument('--ignore', action='store_true', help='Ignore failed indices. ') + # Reuse: will reuse the existing prediction files + parser.add_argument('--reuse', action='store_true') + # Reuse-aux: if set, when reuse is True, will also reuse the auxiliary evaluation files + parser.add_argument('--reuse-aux', type=bool, default=True, help='reuse auxiliary evaluation files') + + args = parser.parse_args() + return args + + +def main(): + logger = get_logger('RUN') + rank, world_size = get_rank_and_world_size() + args = parse_args() + use_config, cfg = False, None + if args.config is not None: + assert args.data is None and args.model is None, '--data and --model should not be set when using --config' + use_config, cfg = True, load(args.config) + args.model = list(cfg['model'].keys()) + args.data = list(cfg['data'].keys()) + else: + assert len(args.data), '--data should be a list of data files' + + if rank == 0: + if not args.reuse: + logger.warning('--reuse is not set, will not reuse previous (before one day) temporary files') + else: + logger.warning('--reuse is set, will reuse the latest prediction & temporary pickle files') + + if 'MMEVAL_ROOT' in os.environ: + args.work_dir = os.environ['MMEVAL_ROOT'] + + if not use_config: + for k, v in supported_VLM.items(): + if hasattr(v, 'keywords') and 'retry' in v.keywords and args.retry is not None: + v.keywords['retry'] = args.retry + supported_VLM[k] = v + if hasattr(v, 'keywords') and 'verbose' in v.keywords and args.verbose is not None: + v.keywords['verbose'] = args.verbose + supported_VLM[k] = v + + if world_size > 1: + local_rank = os.environ.get('LOCAL_RANK', 0) + torch.cuda.set_device(int(local_rank)) + dist.init_process_group( + backend='nccl', + timeout=datetime.timedelta(seconds=int(os.environ.get('DIST_TIMEOUT', 7200))) + ) + + for _, model_name in enumerate(args.model): + model = None + date, commit_id = timestr('day'), githash(digits=8) + eval_id = f"T{date}_G{commit_id}" + + pred_root = osp.join(args.work_dir, model_name, eval_id) + pred_root_meta = osp.join(args.work_dir, model_name) + os.makedirs(pred_root_meta, exist_ok=True) + + prev_pred_roots = ls(osp.join(args.work_dir, model_name), mode='dir') + if len(prev_pred_roots) and args.reuse: + prev_pred_roots.sort() + + if not osp.exists(pred_root): + os.makedirs(pred_root, exist_ok=True) + + if use_config: + model = build_model_from_config(cfg['model'], model_name) + + for _, dataset_name in enumerate(args.data): + if world_size > 1: + dist.barrier() + + try: + result_file_base = f'{model_name}_{dataset_name}.xlsx' + + if use_config: + if world_size > 1: + if rank == 0: + dataset = build_dataset_from_config(cfg['data'], dataset_name) + dist.barrier() + dataset = build_dataset_from_config(cfg['data'], dataset_name) + if dataset is None: + logger.error(f'Dataset {dataset_name} is not valid, will be skipped. ') + continue + else: + dataset_kwargs = {} + if dataset_name in ['MMLongBench_DOC', 'DUDE', 'DUDE_MINI', 'SLIDEVQA', 'SLIDEVQA_MINI']: + dataset_kwargs['model'] = model_name + + # If distributed, first build the dataset on the main process for doing preparation works + if world_size > 1: + if rank == 0: + dataset = build_dataset(dataset_name, **dataset_kwargs) + dist.barrier() + + dataset = build_dataset(dataset_name, **dataset_kwargs) + if dataset is None: + logger.error(f'Dataset {dataset_name} is not valid, will be skipped. ') + continue + + # Handling Multi-Turn Dataset + if dataset.TYPE == 'MT': + result_file_base = result_file_base.replace('.xlsx', '.tsv') + + result_file = osp.join(pred_root, result_file_base) + + # Reuse the previous prediction file if exists + if rank == 0 and len(prev_pred_roots): + prev_result_files = [] + prev_pkl_file_list = [] + for root in prev_pred_roots[::-1]: + if osp.exists(osp.join(root, result_file_base)): + if args.reuse_aux: + prev_result_files = fetch_aux_files(osp.join(root, result_file_base)) + else: + prev_result_files = [osp.join(root, result_file_base)] + break + elif commit_id in root and len(ls(root)) and root != pred_root: + temp_files = ls(root, match=[dataset_name, '.pkl']) + if len(temp_files): + prev_pkl_file_list.extend(temp_files) + break + if not args.reuse: + prev_result_files = [] + prev_pkl_file_list = [] + if len(prev_result_files): + for prev_result_file in prev_result_files: + src = prev_result_file + tgt = osp.join(pred_root, osp.basename(src)) + if not osp.exists(tgt): + shutil.copy(src, tgt) + logger.info(f'--reuse is set, will reuse the prediction file {src}.') + else: + logger.warning(f'File already exists: {tgt}') + + elif len(prev_pkl_file_list): + for fname in prev_pkl_file_list: + target_path = osp.join(pred_root, osp.basename(fname)) + if not osp.exists(target_path): + shutil.copy(fname, target_path) + logger.info(f'--reuse is set, will reuse the prediction pickle file {fname}.') + else: + logger.warning(f'File already exists: {target_path}') + + if world_size > 1: + dist.barrier() + + if model is None: + model = model_name # which is only a name + + # Perform the Inference + if dataset.MODALITY == 'VIDEO': + model = infer_data_job_video( + model, + work_dir=pred_root, + model_name=model_name, + dataset=dataset, + result_file_name=result_file_base, + verbose=args.verbose, + api_nproc=args.api_nproc) + elif dataset.TYPE == 'MT': + model = infer_data_job_mt( + model, + work_dir=pred_root, + model_name=model_name, + dataset=dataset, + verbose=args.verbose, + api_nproc=args.api_nproc, + ignore_failed=args.ignore) + else: + model = infer_data_job( + model, + work_dir=pred_root, + model_name=model_name, + dataset=dataset, + verbose=args.verbose, + api_nproc=args.api_nproc, + ignore_failed=args.ignore) + + # Set the judge kwargs first before evaluation or dumping + + judge_kwargs = { + 'nproc': args.api_nproc, + 'verbose': args.verbose, + 'retry': args.retry if args.retry is not None else 3, + **(json.loads(args.judge_args) if args.judge_args else {}), + } + + if args.retry is not None: + judge_kwargs['retry'] = args.retry + if args.judge is not None: + judge_kwargs['model'] = args.judge + else: + if dataset.TYPE in ['MCQ', 'Y/N', 'MCQ_MMMU_Pro'] or listinstr(['moviechat1k'], dataset_name.lower()): + if listinstr(['WeMath'], dataset_name): + judge_kwargs['model'] = 'gpt-4o-mini' + else: + judge_kwargs['model'] = 'chatgpt-0125' + elif listinstr(['MMVet', 'LLaVABench', 'MMBench_Video'], dataset_name): + judge_kwargs['model'] = 'gpt-4-turbo' + elif listinstr(['MathVista', 'MathVerse', 'MathVision', 'DynaMath', 'VL-RewardBench', 'LogicVista', 'MOAT'], dataset_name): # noqa: E501 + judge_kwargs['model'] = 'gpt-4o-mini' + elif listinstr(['MMLongBench', 'MMDU', 'DUDE', 'SLIDEVQA', 'MIA-Bench', 'WildVision', 'MMAlignBench'], dataset_name): # noqa: E501 + judge_kwargs['model'] = 'gpt-4o' + + if rank == 0: + logger.info(judge_kwargs) + + if world_size > 1: + dist.barrier() + + # Only Rank 0 handles the evaluation part + if rank == 0: + # Prepare Submission Files for MMMU_TEST AND MMT-Bench_ALL + if dataset_name in ['MMMU_TEST']: + result_json = MMMU_result_transfer(result_file) + logger.info(f'Transfer MMMU_TEST result to json for official evaluation, ' + f'json file saved in {result_json}') + continue + elif 'MMT-Bench_ALL' in dataset_name: + submission_file = MMTBench_result_transfer(result_file, **judge_kwargs) + logger.info(f'Extract options from prediction of MMT-Bench FULL split for official evaluation ' + f'(https://eval.ai/web/challenges/challenge-page/2328/overview), ' + f'submission file saved in {submission_file}') + continue + + # Skip the evaluation part if only infer + if args.mode == 'infer': + continue + + # Skip the evaluation part if the dataset evaluation is not supported or annotations are missing + if 'MLLMGuard_DS' in dataset_name: + logger.info('The evaluation of MLLMGuard_DS is not supported yet. ') + continue + elif 'AesBench_TEST' == dataset_name: + logger.info(f'The results are saved in {result_file}. ' + f'Please send it to the AesBench Team via huangyipo@hotmail.com.') + continue + elif dataset_name in ['DocVQA_TEST', 'InfoVQA_TEST', 'Q-Bench1_TEST', 'A-Bench_TEST']: + logger.info(f'{dataset_name} is a test split without ground-truth. ' + 'Thus only the inference part is supported for those datasets. ') + continue + elif dataset_name in [ + 'MMBench_TEST_CN', 'MMBench_TEST_EN', 'MMBench', 'MMBench_CN', + 'MMBench_TEST_CN_V11', 'MMBench_TEST_EN_V11', 'MMBench_V11', 'MMBench_CN_V11' + ] and not MMBenchOfficialServer(dataset_name): + logger.error( + f'Can not evaluate {dataset_name} on non-official servers, will skip the evaluation.') + continue + + # Setup the proxy for the evaluation + eval_proxy = os.environ.get('EVAL_PROXY', None) + old_proxy = os.environ.get('HTTP_PROXY', '') + if eval_proxy is not None: + proxy_set(eval_proxy) + + # Perform the Evaluation + eval_results = dataset.evaluate(result_file, **judge_kwargs) + # Display Evaluation Results in Terminal + if eval_results is not None: + assert isinstance(eval_results, dict) or isinstance(eval_results, pd.DataFrame) + logger.info(f'The evaluation of model {model_name} x dataset {dataset_name} has finished! ') + logger.info('Evaluation Results:') + if isinstance(eval_results, dict): + logger.info('\n' + json.dumps(eval_results, indent=4)) + elif isinstance(eval_results, pd.DataFrame): + if len(eval_results) < len(eval_results.columns): + eval_results = eval_results.T + logger.info('\n' + tabulate(eval_results)) + + # Restore the proxy + if eval_proxy is not None: + proxy_set(old_proxy) + + # Create the symbolic links for the prediction files + files = os.listdir(pred_root) + files = [x for x in files if (f'{model_name}_{dataset_name}' in x or "status.json" in x)] + for f in files: + cwd = os.getcwd() + file_addr = osp.join(cwd, pred_root, f) + link_addr = osp.join(cwd, pred_root_meta, f) + if osp.exists(link_addr) or osp.islink(link_addr): + os.remove(link_addr) + os.symlink(file_addr, link_addr) + + except Exception as e: + logger.exception(f'Model {model_name} x Dataset {dataset_name} combination failed: {e}, ' + 'skipping this combination.') + continue + + if world_size > 1: + dist.destroy_process_group() + + +if __name__ == '__main__': + load_env() + main() diff --git a/run_for_bash.py b/run_for_bash.py new file mode 100644 index 0000000000000000000000000000000000000000..dfe02e34d97361d04a8f28a935d5006fc4dd980a --- /dev/null +++ b/run_for_bash.py @@ -0,0 +1,409 @@ +import json + +import torch +import torch.distributed as dist + +from vlmeval.config import supported_VLM +from vlmeval.dataset.video_dataset_config import supported_video_datasets +from vlmeval.dataset import build_dataset +from vlmeval.inference import infer_data_job +from vlmeval.inference_video import infer_data_job_video +from vlmeval.inference_mt import infer_data_job_mt +from vlmeval.smp import * +from vlmeval.utils.result_transfer import MMMU_result_transfer, MMTBench_result_transfer + +def build_model_from_config(cfg, model_name): + import vlmeval.api + import vlmeval.vlm + config = cp.deepcopy(cfg[model_name]) + if config == {}: + return supported_VLM[model_name]() + assert 'class' in config + cls_name = config.pop('class') + if hasattr(vlmeval.api, cls_name): + return getattr(vlmeval.api, cls_name)(**config) + elif hasattr(vlmeval.vlm, cls_name): + return getattr(vlmeval.vlm, cls_name)(**config) + else: + raise ValueError(f'Class {cls_name} is not supported in `vlmeval.api` or `vlmeval.vlm`') + + + +def parse_args(): + help_msg = """\ +You can launch the evaluation by setting either --data and --model or --config. + +--data and --model: + Each Arg should be a list of strings, specifying the names of datasets and models. + To find all supported model names, please refer to the `vlmeval/config.py` of check the output of the command \ + `vlmutil mlist all` in the terminal (you should first have vlmeval installed). + To find all supported dataset names, please refer to the `vlmeval/dataset/__init__.py` file. The python script \ + to print all supported dataset names is as follows: + ```python + from vlmeval.dataset import SUPPORTED_DATASETS + print(SUPPORTED_DATASETS) + ``` + or you can check the output of the command `vlmutil dlist all` in the terminal. + To find all supported video dataset default settings, please refer to the \ + `vlmeval/dataset/video_dataset_config.py` file. + +--config: + Launch the evaluation by specifying the path to the config json file. Sample Json Content: + ```json + { + "model": { + "GPT4o_20240806_T00_HIGH": { + "class": "GPT4V", + "model": "gpt-4o-2024-08-06", + "temperature": 0, + "img_detail": "high" + }, + "GPT4o_20240806_T10_Low": { + "class": "GPT4V", + "model": "gpt-4o-2024-08-06", + "temperature": 1.0, + "img_detail": "low" + }, + "GPT4o_20241120": {} + }, + "data": { + "MME-RealWorld-Lite": { + "class": "MMERealWorld", + "dataset": "MME-RealWorld-Lite" + }, + "MMBench_DEV_EN_V11": { + "class": "ImageMCQDataset", + "dataset": "MMBench_DEV_EN_V11" + }, + "MMBench_Video_8frame_nopack": {}, + "Video-MME_16frame_subs": { + "class": "VideoMME", + "dataset": "Video-MME", + "nframe": 16, + "use_subtitle": true, + } + } + } + ``` + Currently, only `model` and `data` are supported fields. The content of each field is a dictionary. + For `model`, the key is the name of the model, and the value is a dictionary containing the following keys: + - `class`: The class name of the model, which should be a class in `vlmeval.vlm` or `vlmeval.api`. + - Other keys are specific to the model, please refer to the corresponding class. + - Tip: The defined model in the `supported_VLM` of `vlmeval/config.py` can be used as a shortcut. + For `data`, the key is the name of the dataset (should be the same as the `dataset` field in most cases, \ + except for video datasets), and the value is a dictionary containing the following keys: + - `class`: The class name of the dataset, which should be a class in `vlmeval.dataset`. + - `dataset`: The name of the dataset, which should be a string that is accepted by the `dataset` argument of the \ + corresponding class. + - Other keys are specific to the dataset, please refer to the corresponding class. + - Tip: The defined dataset in the `supported_video_datasets` of `vlmeval/dataset/video_dataset_config.py` \ + can be used as a shortcut. + + The keys in the `model` and `data` fields will be used for naming the prediction files and evaluation results. + When launching with `--config`, args for API VLMs, such as `--retry`, `--verbose`, will be ignored. +""" + parser = argparse.ArgumentParser(description=help_msg, formatter_class=argparse.RawTextHelpFormatter) + # Essential Args, Setting the Names of Datasets and Models + parser.add_argument('--data', type=str, nargs='+', help='Names of Datasets') + parser.add_argument('--model', type=str, nargs='+', help='Names of Models') + parser.add_argument('--config', type=str, help='Path to the Config Json File') + # Work Dir + parser.add_argument('--work-dir', type=str, default='./outputs', help='select the output directory') + # Infer + Eval or Infer Only + parser.add_argument('--mode', type=str, default='all', choices=['all', 'infer']) + # API Kwargs, Apply to API VLMs and Judge API LLMs + parser.add_argument('--api-nproc', type=int, default=4, help='Parallel API calling') + parser.add_argument('--retry', type=int, default=None, help='retry numbers for API VLMs') + parser.add_argument('--judge-args', type=str, default=None, help='Judge arguments in JSON format') + # Explicitly Set the Judge Model + parser.add_argument('--judge', type=str, default=None) + # Logging Utils + parser.add_argument('--verbose', action='store_true') + # Configuration for Resume + # Ignore: will not rerun failed VLM inference + parser.add_argument('--ignore', action='store_true', help='Ignore failed indices. ') + # Reuse: will reuse the existing prediction files + parser.add_argument('--reuse', action='store_true') + # Reuse-aux: if set, when reuse is True, will also reuse the auxiliary evaluation files + parser.add_argument('--reuse-aux', type=bool, default=True, help='reuse auxiliary evaluation files') + + args = parser.parse_args() + return args + + +def main(): + logger = get_logger('RUN') + rank, world_size = get_rank_and_world_size() + args = parse_args() + use_config, cfg = False, None + if args.config is not None: + assert args.model is None, '--model should not be set when using --config' + use_config, cfg = True, load(args.config) + args.model = list(cfg['model'].keys()) + else: + assert len(args.data), '--data should be a list of data files' + + if rank == 0: + if not args.reuse: + logger.warning('--reuse is not set, will not reuse previous (before one day) temporary files') + else: + logger.warning('--reuse is set, will reuse the latest prediction & temporary pickle files') + + if 'MMEVAL_ROOT' in os.environ: + args.work_dir = os.environ['MMEVAL_ROOT'] + + if not use_config: + for k, v in supported_VLM.items(): + if hasattr(v, 'keywords') and 'retry' in v.keywords and args.retry is not None: + v.keywords['retry'] = args.retry + supported_VLM[k] = v + if hasattr(v, 'keywords') and 'verbose' in v.keywords and args.verbose is not None: + v.keywords['verbose'] = args.verbose + supported_VLM[k] = v + + if world_size > 1: + local_rank = os.environ.get('LOCAL_RANK', 0) + torch.cuda.set_device(int(local_rank)) + dist.init_process_group( + backend='nccl', + timeout=datetime.timedelta(seconds=int(os.environ.get('DIST_TIMEOUT', 3600*2))) + ) + + for _, model_name in enumerate(args.model): + model = None + date, commit_id = timestr('day'), githash(digits=8) + eval_id = f"T{date}_G{commit_id}" + + pred_root = osp.join(args.work_dir, model_name, eval_id) + pred_root_meta = osp.join(args.work_dir, model_name) + os.makedirs(pred_root_meta, exist_ok=True) + + prev_pred_roots = ls(osp.join(args.work_dir, model_name), mode='dir') + if len(prev_pred_roots) and args.reuse: + prev_pred_roots.sort() + + if not osp.exists(pred_root): + os.makedirs(pred_root, exist_ok=True) + + if use_config: + model = build_model_from_config(cfg['model'], model_name) + + for _, dataset_name in enumerate(args.data): + if world_size > 1: + dist.barrier() + + try: + result_file_base = f'{model_name}_{dataset_name}.xlsx' + + dataset_kwargs = {} + if dataset_name in ['MMLongBench_DOC', 'DUDE', 'DUDE_MINI', 'SLIDEVQA', 'SLIDEVQA_MINI']: + dataset_kwargs['model'] = model_name + + # If distributed, first build the dataset on the main process for doing preparation works + if world_size > 1: + if rank == 0: + dataset = build_dataset(dataset_name, **dataset_kwargs) + dist.barrier() + + dataset = build_dataset(dataset_name, **dataset_kwargs) + if dataset is None: + logger.error(f'Dataset {dataset_name} is not valid, will be skipped. ') + continue + + # Handling Multi-Turn Dataset + if dataset.TYPE == 'MT': + result_file_base = result_file_base.replace('.xlsx', '.tsv') + + result_file = osp.join(pred_root, result_file_base) + + # Reuse the previous prediction file if exists + if rank == 0 and len(prev_pred_roots): + prev_result_files = [] + prev_pkl_file_list = [] + for root in prev_pred_roots[::-1]: + if osp.exists(osp.join(root, result_file_base)): + if args.reuse_aux: + prev_result_files = fetch_aux_files(osp.join(root, result_file_base)) + else: + prev_result_files = [osp.join(root, result_file_base)] + break + elif commit_id in root and len(ls(root)) and root != pred_root: + temp_files = ls(root, match=[dataset_name, '.pkl']) + if len(temp_files): + prev_pkl_file_list.extend(temp_files) + break + if not args.reuse: + prev_result_files = [] + prev_pkl_file_list = [] + if len(prev_result_files): + for prev_result_file in prev_result_files: + src = prev_result_file + tgt = osp.join(pred_root, osp.basename(src)) + if not osp.exists(tgt): + shutil.copy(src, tgt) + logger.info(f'--reuse is set, will reuse the prediction file {src}.') + else: + logger.warning(f'File already exists: {tgt}') + + elif len(prev_pkl_file_list): + for fname in prev_pkl_file_list: + target_path = osp.join(pred_root, osp.basename(fname)) + if not osp.exists(target_path): + shutil.copy(fname, target_path) + logger.info(f'--reuse is set, will reuse the prediction pickle file {fname}.') + else: + logger.warning(f'File already exists: {target_path}') + + if world_size > 1: + dist.barrier() + + if model is None: + model = model_name # which is only a name + + # Perform the Inference + if dataset.MODALITY == 'VIDEO': + model = infer_data_job_video( + model, + work_dir=pred_root, + model_name=model_name, + dataset=dataset, + result_file_name=result_file_base, + verbose=args.verbose, + api_nproc=args.api_nproc) + elif dataset.TYPE == 'MT': + model = infer_data_job_mt( + model, + work_dir=pred_root, + model_name=model_name, + dataset=dataset, + verbose=args.verbose, + api_nproc=args.api_nproc, + ignore_failed=args.ignore) + else: + model = infer_data_job( + model, + work_dir=pred_root, + model_name=model_name, + dataset=dataset, + verbose=args.verbose, + api_nproc=args.api_nproc, + ignore_failed=args.ignore) + + # Set the judge kwargs first before evaluation or dumping + + judge_kwargs = { + 'nproc': args.api_nproc, + 'verbose': args.verbose, + 'retry': args.retry if args.retry is not None else 3, + **(json.loads(args.judge_args) if args.judge_args else {}), + } + + if args.retry is not None: + judge_kwargs['retry'] = args.retry + if args.judge is not None: + judge_kwargs['model'] = args.judge + else: + if dataset.TYPE in ['MCQ', 'Y/N', 'MCQ_MMMU_Pro'] or listinstr(['moviechat1k'], dataset_name.lower()): + if listinstr(['WeMath'], dataset_name): + judge_kwargs['model'] = 'gpt-4o-mini' + else: + judge_kwargs['model'] = 'chatgpt-0125' + elif listinstr(['MMVet', 'LLaVABench', 'MMBench_Video'], dataset_name): + judge_kwargs['model'] = 'gpt-4-turbo' + elif listinstr(['MathVista', 'MathVerse', 'MathVision', 'DynaMath', 'VL-RewardBench', 'LogicVista', 'MOAT'], dataset_name): # noqa: E501 + judge_kwargs['model'] = 'gpt-4o-mini' + elif listinstr(['MMLongBench', 'MMDU', 'DUDE', 'SLIDEVQA', 'MIA-Bench', 'WildVision', 'MMAlignBench'], dataset_name): # noqa: E501 + judge_kwargs['model'] = 'gpt-4o' + + if rank == 0: + logger.info(judge_kwargs) + + if world_size > 1: + dist.barrier() + + # Only Rank 0 handles the evaluation part + if rank == 0: + # Prepare Submission Files for MMMU_TEST AND MMT-Bench_ALL + if dataset_name in ['MMMU_TEST']: + result_json = MMMU_result_transfer(result_file) + logger.info(f'Transfer MMMU_TEST result to json for official evaluation, ' + f'json file saved in {result_json}') + continue + elif 'MMT-Bench_ALL' in dataset_name: + submission_file = MMTBench_result_transfer(result_file, **judge_kwargs) + logger.info(f'Extract options from prediction of MMT-Bench FULL split for official evaluation ' + f'(https://eval.ai/web/challenges/challenge-page/2328/overview), ' + f'submission file saved in {submission_file}') + continue + + # Skip the evaluation part if only infer + if args.mode == 'infer': + continue + + # Skip the evaluation part if the dataset evaluation is not supported or annotations are missing + if 'MLLMGuard_DS' in dataset_name: + logger.info('The evaluation of MLLMGuard_DS is not supported yet. ') + continue + elif 'AesBench_TEST' == dataset_name: + logger.info(f'The results are saved in {result_file}. ' + f'Please send it to the AesBench Team via huangyipo@hotmail.com.') + continue + elif dataset_name in ['DocVQA_TEST', 'InfoVQA_TEST', 'Q-Bench1_TEST', 'A-Bench_TEST']: + logger.info(f'{dataset_name} is a test split without ground-truth. ' + 'Thus only the inference part is supported for those datasets. ') + continue + elif dataset_name in [ + 'MMBench_TEST_CN', 'MMBench_TEST_EN', 'MMBench', 'MMBench_CN', + 'MMBench_TEST_CN_V11', 'MMBench_TEST_EN_V11', 'MMBench_V11', 'MMBench_CN_V11' + ] and not MMBenchOfficialServer(dataset_name): + logger.error( + f'Can not evaluate {dataset_name} on non-official servers, will skip the evaluation.') + continue + + # Setup the proxy for the evaluation + eval_proxy = os.environ.get('EVAL_PROXY', None) + old_proxy = os.environ.get('HTTP_PROXY', '') + if eval_proxy is not None: + proxy_set(eval_proxy) + + # Perform the Evaluation + eval_results = dataset.evaluate(result_file, **judge_kwargs) + # Display Evaluation Results in Terminal + if eval_results is not None: + assert isinstance(eval_results, dict) or isinstance(eval_results, pd.DataFrame) + logger.info(f'The evaluation of model {model_name} x dataset {dataset_name} has finished! ') + logger.info('Evaluation Results:') + if isinstance(eval_results, dict): + logger.info('\n' + json.dumps(eval_results, indent=4)) + elif isinstance(eval_results, pd.DataFrame): + if len(eval_results) < len(eval_results.columns): + eval_results = eval_results.T + logger.info('\n' + tabulate(eval_results)) + + # Restore the proxy + if eval_proxy is not None: + proxy_set(old_proxy) + + # Create the symbolic links for the prediction files + files = os.listdir(pred_root) + files = [x for x in files if (f'{model_name}_{dataset_name}' in x or "status.json" in x)] + for f in files: + cwd = os.getcwd() + file_addr = osp.join(cwd, pred_root, f) + link_addr = osp.join(cwd, pred_root_meta, f) + if osp.exists(link_addr) or osp.islink(link_addr): + os.remove(link_addr) + os.symlink(file_addr, link_addr) + + except Exception as e: + logger.exception(f'Model {model_name} x Dataset {dataset_name} combination failed: {e}, ' + 'skipping this combination.') + continue + + if world_size > 1: + dist.destroy_process_group() + + +if __name__ == '__main__': + load_env() + main() diff --git a/scripts/AI2D_preproc.ipynb b/scripts/AI2D_preproc.ipynb new file mode 100644 index 0000000000000000000000000000000000000000..f93b8a880e5fc636a40abc76ab6a59b9c3c7eeda --- /dev/null +++ b/scripts/AI2D_preproc.ipynb @@ -0,0 +1,261 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import os, cv2\n", + "import string\n", + "import os.path as osp\n", + "import numpy as np\n", + "from collections import defaultdict\n", + "from vlmeval.smp import ls, load, dump, download_file, encode_image_file_to_base64, md5, mrlines\n", + "import pandas as pd\n", + "import matplotlib.pyplot as plt\n", + "import multiprocessing as mp\n", + "from PIL import Image, ImageFont, ImageDraw\n", + "\n", + "font_URL = 'http://opencompass.openxlab.space/utils/Fonts/timesb.ttf'\n", + "font_file = 'timesb.ttf'\n", + "if not osp.exists(font_file):\n", + " download_file(font_URL)\n", + " \n", + "test_split_URL = 'https://s3-us-east-2.amazonaws.com/prior-datasets/ai2d_test_ids.csv'\n", + "test_split_file = 'ai2d_test_ids.csv'\n", + "if not osp.exists(test_split_file):\n", + " download_file(test_split_URL)\n", + " \n", + "test_ids = set(mrlines(test_split_file))\n", + " \n", + "def proper_font_size(font_file, wh, text, ratio=1):\n", + " font_size = 2\n", + " while True:\n", + " font = ImageFont.truetype(font_file, font_size)\n", + " real_box = font.getbbox(text)\n", + " real_wh = (real_box[2] - real_box[0], real_box[3] - real_box[1])\n", + " if real_wh[0] > wh[0] * ratio or real_wh[1] > wh[1] * ratio:\n", + " break\n", + " font_size += 1\n", + " return font_size\n", + "\n", + "def cover_image(ann_path):\n", + " data = load(ann_path)\n", + " texts = list(data['text'].values())\n", + " raw_img = ann_path.replace('annotations', 'images').replace('.json', '')\n", + " tgt_img = raw_img.replace('images', 'images_abc')\n", + " img = Image.open(raw_img)\n", + " draw = ImageDraw.Draw(img)\n", + " for text in texts:\n", + " st, ed = tuple(text['rectangle'][0]), tuple(text['rectangle'][1])\n", + " T = text['replacementText']\n", + " draw.rectangle((st, ed), fill='white')\n", + " font_size = proper_font_size(font_file, (ed[0] - st[0], ed[1] - st[1]), T, ratio=1)\n", + " font = ImageFont.truetype(font_file, font_size)\n", + " text_box = font.getbbox(T)\n", + " text_wh = (text_box[2] - text_box[0], text_box[3] - text_box[1])\n", + " cx, cy = (st[0] + ed[0]) // 2, st[1]\n", + " stx = cx - text_wh[0] // 2\n", + " sty = cy - text_wh[1] // 2\n", + " draw.text((stx, sty), T, font=font, fill='black')\n", + " img.save(tgt_img) " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Process for no mask images\n", + "test_ids = set(mrlines(test_split_file))\n", + "\n", + "def detect_image_color(image):\n", + " gray_image = image.convert('L')\n", + " mean_brightness = np.mean(np.array(gray_image))\n", + " if mean_brightness < 127:\n", + " return 'white'\n", + " else:\n", + " return 'black'\n", + "\n", + "def cover_image(ann_path):\n", + " data = load(ann_path)\n", + " texts = list(data['text'].values())\n", + " raw_img = ann_path.replace('annotations', 'images').replace('.json', '')\n", + " tgt_img = raw_img.replace('images', 'images_abc')\n", + " img = Image.open(raw_img)\n", + " draw = ImageDraw.Draw(img)\n", + " color = detect_image_color(img)\n", + " font_size = 0\n", + " for text in texts:\n", + " st, ed = tuple(text['rectangle'][0]), tuple(text['rectangle'][1])\n", + " font_size += (ed[1] - st[1])\n", + " if len(texts) != 0:\n", + " font_size /= len(texts)\n", + " else:\n", + " font_size = 2\n", + " for text in texts:\n", + " st, ed = tuple(text['rectangle'][0]), tuple(text['rectangle'][1])\n", + " T = text['replacementText']\n", + " for i in range(2):\n", + " draw.rectangle(\n", + " [(st[0] - i, st[1] - i), (ed[0] + i, ed[1] + i)],\n", + " outline=color\n", + " )\n", + " font = ImageFont.truetype(font_file, font_size)\n", + " text_box = font.getbbox(T)\n", + " text_wh = (text_box[2] - text_box[0], text_box[3] - text_box[1])\n", + " cx, cy = (st[0] + ed[0]) // 2, st[1]\n", + " stx = cx - text_wh[0] // 2\n", + " sty = cy - text_wh[1] * 1.5\n", + " if sty < 0:\n", + " sty = cy + text_wh[1] * 1.3\n", + " draw.text((stx, sty), T, font=font, fill=color)\n", + " img.save(tgt_img) " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "download_file('https://ai2-public-datasets.s3.amazonaws.com/diagrams/ai2d-all.zip')\n", + "os.system('unzip -o ai2d-all.zip')\n", + "\n", + "images = ls('ai2d/images/')\n", + "questions = ls('ai2d/questions/')\n", + "annotations = ls('ai2d/annotations/')\n", + "cates = load('ai2d/categories.json')" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "pool = mp.Pool(32)\n", + "pool.map(cover_image, annotations)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def puncproc(inText):\n", + " import re\n", + " outText = inText\n", + " punct = [\n", + " ';', r'/', '[', ']', '\"', '{', '}', '(', ')', '=', '+', '\\\\', '_', '-',\n", + " '>', '<', '@', '`', ',', '?', '!'\n", + " ]\n", + " commaStrip = re.compile('(\\d)(,)(\\d)') # noqa: W605\n", + " periodStrip = re.compile('(?!<=\\d)(\\.)(?!\\d)') # noqa: W605\n", + " for p in punct:\n", + " if (p + ' ' in inText or ' ' + p in inText) or (re.search(commaStrip, inText) is not None):\n", + " outText = outText.replace(p, '')\n", + " else:\n", + " outText = outText.replace(p, ' ')\n", + " outText = periodStrip.sub('', outText, re.UNICODE)\n", + " return outText\n", + "\n", + "def check_choices(line):\n", + " def ischar(s):\n", + " s = str(s)\n", + " if s in ['{}', 'Both', 'None of above']:\n", + " return True\n", + " elif s.startswith('Stage ') and ischar(s[6:]):\n", + " return True\n", + " elif ' and ' in s and np.all([ischar(x) for x in s.split(' and ')]):\n", + " return True\n", + " elif len(s) <= 2:\n", + " return True\n", + " elif len(puncproc(s).split()) > 1:\n", + " return np.all([ischar(x) for x in puncproc(s).split()])\n", + " return False\n", + " n_char = sum([ischar(line[x]) for x in 'ABCD'])\n", + " return n_char >= 3\n", + "\n", + "def check_question(question):\n", + " words = puncproc(question).split()\n", + " for ch in string.ascii_lowercase + string.ascii_uppercase:\n", + " if ch in words:\n", + " return True\n", + " return False\n", + "\n", + "def is_abc(abc, choices, question):\n", + " if abc == 0:\n", + " return False\n", + " if check_choices(choices):\n", + " return True\n", + " if check_question(question):\n", + " return True\n", + " return False" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "data_all = defaultdict(list)\n", + "for qfile in questions:\n", + " data = load(qfile)\n", + " idx = data['imageName'].split('.')[0]\n", + " if idx not in test_ids:\n", + " continue\n", + " image_pth = qfile.replace('questions', 'images').replace('.json', '')\n", + " cate = cates[image_pth.split('/')[-1]]\n", + " for q, qmeta in data['questions'].items():\n", + " assert '.png-' in qmeta['questionId']\n", + " main, sub = qmeta['questionId'].split('.png-')\n", + " idx = int(main) * 100 + int(sub)\n", + " \n", + " answers = qmeta['answerTexts']\n", + " correct = qmeta['correctAnswer']\n", + " \n", + " data_all['index'].append(idx)\n", + " data_all['question'].append(q)\n", + " assert len(answers) == 4\n", + " for c, a in zip('ABCD', answers):\n", + " data_all[c].append(a)\n", + " data_all['answer'].append('ABCD'[qmeta['correctAnswer']])\n", + " data_all['category'].append(cate)\n", + " data_all['abcLabel'].append(qmeta['abcLabel'])\n", + " abc = is_abc(qmeta['abcLabel'], {x: data_all[x][-1] for x in 'ABCD'}, q)\n", + " # if qmeta['abcLabel'] and not abc:\n", + " # print(qmeta['abcLabel'], {x: data_all[x][-1] for x in 'ABCD'}, q)\n", + " data_all['image_path'].append(image_pth.replace('images', 'images_abc') if abc else image_pth)\n", + "data = pd.DataFrame(data_all)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "images = []\n", + "image_seen = {}\n", + "for idx, pth in zip(data['index'], data['image_path']):\n", + " images.append(encode_image_file_to_base64(pth))\n", + "\n", + "data['image'] = images\n", + "dump(data, 'AI2D_TEST.tsv')\n", + "print(md5('AI2D_TEST.tsv'))" + ] + } + ], + "metadata": { + "language_info": { + "name": "python" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/scripts/apires_scan.py b/scripts/apires_scan.py new file mode 100644 index 0000000000000000000000000000000000000000..c6036625f534edc05b081a075f1e001e8eba3267 --- /dev/null +++ b/scripts/apires_scan.py @@ -0,0 +1,55 @@ +import sys +from vlmeval import * +from vlmeval.dataset import SUPPORTED_DATASETS +FAIL_MSG = 'Failed to obtain answer via API.' + +root = sys.argv[1] +if root[-1] in '/\\': + root = root[:-1] + +model_name = root.split('/')[-1] + +for d in SUPPORTED_DATASETS: + fname = f'{model_name}_{d}.xlsx' + pth = osp.join(root, fname) + if osp.exists(pth): + data = load(pth) + # Detect Failure + assert 'prediction' in data + data['prediction'] = [str(x) for x in data['prediction']] + fail = [FAIL_MSG in x for x in data['prediction']] + if sum(fail): + nfail = sum(fail) + ntot = len(fail) + print(f'Model {model_name} x Dataset {d}: {nfail} out of {ntot} failed. {nfail / ntot * 100: .2f}%. ') + + eval_files = ls(root, match=f'{model_name}_{d}_') + eval_files = [x for x in eval_files if listinstr([f'{d}_openai', f'{d}_gpt'], x) and x.endswith('.xlsx')] + + if len(eval_files) == 0: + print(f'Model {model_name} x Dataset {d} openai missing') + continue + + assert len(eval_files) == 1 + eval_file = eval_files[0] + data = load(eval_file) + + if 'MMVet' in d: + bad = [x for x in data['log'] if 'All 5 retries failed.' in str(x)] + if len(bad): + print(f'Model {model_name} x Dataset {d} Evaluation: {len(bad)} out of {len(data)} failed.') + elif 'MathVista' in d: + bad = [x for x in data['res'] if FAIL_MSG in str(x)] + if len(bad): + print(f'Model {model_name} x Dataset {d} Evaluation: {len(bad)} out of {len(data)} failed.') + + elif d == 'LLaVABench': + sub = data[data['gpt4_score'] == -1] + sub = sub[sub['gpt4_score'] == -1] + if len(sub): + print(f'Model {model_name} x Dataset {d} Evaluation: {len(sub)} out of {len(data)} failed.') + else: + bad = [x for x in data['log'] if FAIL_MSG in str(x)] + if len(bad): + print(f'Model {model_name} x Dataset {d} Evaluation: {len(bad)} out of {len(data)} failed.') + \ No newline at end of file diff --git a/scripts/auto_run.py b/scripts/auto_run.py new file mode 100644 index 0000000000000000000000000000000000000000..f3cd1bbf3a230e98bdb1a0298f6322b82d62a859 --- /dev/null +++ b/scripts/auto_run.py @@ -0,0 +1,38 @@ +import argparse +from vlmeval.smp import * +from vlmeval.config import supported_VLM + +def is_api(x): + return getattr(supported_VLM[x].func, 'is_api', False) + +models = list(supported_VLM) +models = [x for x in models if 'fs' not in x] +models = [x for x in models if not is_api(x)] +exclude_list = ['cogvlm-grounding-generalist', 'emu2'] +models = [x for x in models if x not in exclude_list] + +def is_large(x): + return '80b' in x or 'emu2' in x or '34B' in x + +small_models = [x for x in models if not is_large(x)] +large_models = [x for x in models if is_large(x)] +models = small_models + large_models + +parser = argparse.ArgumentParser() +parser.add_argument('--data', type=str, nargs='+', required=True) +args = parser.parse_args() + +# Skip some models +models = [x for x in models if not listinstr(['MiniGPT', 'grounding-generalist'], x)] + +for m in models: + unknown_datasets = [x for x in args.data if not osp.exists(f'{m}/{m}_{x}.xlsx')] + if len(unknown_datasets) == 0: + continue + dataset_str = ' '.join(unknown_datasets) + if '80b' in m: + cmd = f'python run.py --data {dataset_str} --model {m}' + else: + cmd = f'bash run.sh --data {dataset_str} --model {m}' + print(cmd) + os.system(cmd) \ No newline at end of file diff --git a/scripts/cover.sh b/scripts/cover.sh new file mode 100644 index 0000000000000000000000000000000000000000..0a35c5086d3e22bfc447cb5e3eff0054ee6a254c --- /dev/null +++ b/scripts/cover.sh @@ -0,0 +1,4 @@ +#!/bin/bash +DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) +cp $DIR/../config.py $DIR/../vlmeval/ +cp $DIR/../misc/* $DIR/../vlmeval/vlm/misc/ \ No newline at end of file diff --git a/scripts/data_browser.py b/scripts/data_browser.py new file mode 100644 index 0000000000000000000000000000000000000000..3d6ac06a03414be7f5bd8965f810253a5b08296d --- /dev/null +++ b/scripts/data_browser.py @@ -0,0 +1,174 @@ +""" +pip install gradio # proxy_on first +python vis_geochat_data.py +# browse data in http://127.0.0.1:10064 +""" + +import os +import io +import json +import copy +import time +import gradio as gr +import base64 +from PIL import Image +from io import BytesIO +from argparse import Namespace +# from llava import conversation as conversation_lib +from typing import Sequence +from vlmeval import * +from vlmeval.dataset import SUPPORTED_DATASETS, build_dataset + +SYS = "You are a helpful assistant. Your job is to faithfully translate all provided text into Chinese faithfully. " + +# Translator = SiliconFlowAPI(model='Qwen/Qwen2.5-7B-Instruct', system_prompt=SYS) +Translator = OpenAIWrapper(model='gpt-4o-mini', system_prompt=SYS) + + +def image_to_mdstring(image): + return f"![image](data:image/jpeg;base64,{image})" + + +def images_to_md(images): + return '\n\n'.join([image_to_mdstring(image) for image in images]) + + +def mmqa_display(question, target_size=2048): + question = {k.lower() if len(k) > 1 else k: v for k, v in question.items()} + keys = list(question.keys()) + keys = [k for k in keys if k not in ['index', 'image']] + + idx = question.pop('index', 'XXX') + text = f'\n- INDEX: {idx}\n' + + if 'image' in question: + images = question.pop('image') + if images[0] == '[' and images[-1] == ']': + images = eval(images) + else: + images = [images] + else: + images = question.pop('image_path') + if images[0] == '[' and images[-1] == ']': + images = eval(images) + else: + images = [images] + images = [encode_image_file_to_base64(x) for x in images] + + qtext = question.pop('question', None) + if qtext is not None: + text += f'- QUESTION: {qtext}\n' + + if 'A' in question: + text += f'- Choices: \n' + for k in string.ascii_uppercase: + if k in question: + text += f'\t-{k}: {question.pop(k)}\n' + answer = question.pop('answer', None) + + for k in question: + if not pd.isna(question[k]): + text += f'- {k.upper()}. {question[k]}\n' + + if answer is not None: + text += f'- ANSWER: {answer}\n' + + image_md = images_to_md(images) + + return text, image_md + + +def parse_args(): + parser = argparse.ArgumentParser() + # Essential Args, Setting the Names of Datasets and Models + parser.add_argument('--port', type=int, default=7860) + args = parser.parse_args() + return args + + +def gradio_app_vis_dataset(port=7860): + data, loaded_obj = None, {} + + def btn_submit_click(filename, ann_id): + if filename not in loaded_obj: + return filename_change(filename, ann_id) + nonlocal data + data_desc = gr.Markdown(f'Visualizing {filename}, {len(data)} samples in total. ') + if ann_id < 0 or ann_id >= len(data): + return filename, ann_id, data_desc, gr.Markdown('Invalid Index'), gr.Markdown(f'Index out of range [0, {len(data) - 1}]') + item = data.iloc[ann_id] + text, image_md = mmqa_display(item) + return filename, ann_id, data_desc, image_md, text + + def btn_next_click(filename, ann_id): + return btn_submit_click(filename, ann_id + 1) + + # def translate_click(anno_en): + # return gr.Markdown(Translator.generate(anno_en)) + + def filename_change(filename, ann_id): + nonlocal data, loaded_obj + + def legal_filename(filename): + LMURoot = LMUDataRoot() + if filename in SUPPORTED_DATASETS: + return build_dataset(filename).data + elif osp.exists(filename): + data = load(filename) + assert 'index' in data and 'image' in data + image_map = {i: image for i, image in zip(data['index'], data['image'])} + for k, v in image_map.items(): + if (not isinstance(v, str) or len(v) < 64) and v in image_map: + image_map[k] = image_map[v] + data['image'] = [image_map[k] for k in data['index']] + return data + elif osp.exists(osp.join(LMURoot, filename)): + filename = osp.join(LMURoot, filename) + return legal_filename(filename) + else: + return None + + data = legal_filename(filename) + if data is None: + return filename, 0, gr.Markdown(''), gr.Markdown("File not found"), gr.Markdown("File not found") + + loaded_obj[filename] = data + return btn_submit_click(filename, 0) + + with gr.Blocks() as app: + + filename = gr.Textbox( + value='Dataset Name (supported by VLMEvalKit) or TSV FileName (Relative under `LMURoot` or Real Path)', + label='Dataset', + interactive=True, + visible=True) + + with gr.Row(): + ann_id = gr.Number(0, label='Sample Index (Press Enter)', interactive=True, visible=True) + btn_next = gr.Button("Next") + # btn_translate = gr.Button('CN Translate') + + with gr.Row(): + data_desc = gr.Markdown('Dataset Description', label='Dataset Description') + + with gr.Row(): + image_output = gr.Markdown('Image PlaceHolder', label='Image Visualization') + anno_en = gr.Markdown('Image Annotation', label='Image Annotation') + # anno_cn = gr.Markdown('Image Annotation (Chinese)', label='Image Annotation (Chinese)') + + input_components = [filename, ann_id] + all_components = [filename, ann_id, data_desc, image_output, anno_en] + + filename.submit(filename_change, input_components, all_components) + ann_id.submit(btn_submit_click, input_components, all_components) + btn_next.click(btn_next_click, input_components, all_components) + # btn_translate.click(translate_click, anno_en, anno_cn) + + # app.launch() + app.launch(server_name='0.0.0.0', debug=True, show_error=True, server_port=port) + + +if __name__ == "__main__": + args = parse_args() + gradio_app_vis_dataset(port=args.port) + diff --git a/scripts/run.sh b/scripts/run.sh new file mode 100644 index 0000000000000000000000000000000000000000..5dab509ccd6a6f39d528d2b63b4d19a92d1c1795 --- /dev/null +++ b/scripts/run.sh @@ -0,0 +1,4 @@ +#!/bin/bash +set -x +export GPU=$(nvidia-smi --list-gpus | wc -l) +torchrun --nproc-per-node=$GPU run.py ${@:1} \ No newline at end of file diff --git a/scripts/srun.sh b/scripts/srun.sh new file mode 100644 index 0000000000000000000000000000000000000000..a9c697e4f530d2404d24469da9bf7689ed7ab346 --- /dev/null +++ b/scripts/srun.sh @@ -0,0 +1,3 @@ +#!/bin/bash +set -x +srun -n1 --ntasks-per-node=1 --partition $1 --gres=gpu:8 --quotatype=reserved --job-name vlmeval --cpus-per-task=64 torchrun --nproc-per-node=8 run.py ${@:2} \ No newline at end of file diff --git a/scripts/summarize.py b/scripts/summarize.py new file mode 100644 index 0000000000000000000000000000000000000000..faf08b7f6f9452a1e55d437291f837ab7bbf470c --- /dev/null +++ b/scripts/summarize.py @@ -0,0 +1,109 @@ +from vlmeval.smp import * +from vlmeval.dataset import SUPPORTED_DATASETS + +def get_score(model, dataset): + + file_name = f'{model}/{model}_{dataset}' + if listinstr([ + 'CCBench', 'MMBench', 'SEEDBench_IMG', 'MMMU', 'ScienceQA', + 'AI2D_TEST', 'MMStar', 'RealWorldQA', 'BLINK', 'VisOnlyQA-VLMEvalKit' + ], dataset): + file_name += '_acc.csv' + elif listinstr(['MME', 'Hallusion', 'LLaVABench'], dataset): + file_name += '_score.csv' + elif listinstr(['MMVet', 'MathVista'], dataset): + file_name += '_gpt-4-turbo_score.csv' + elif listinstr(['COCO', 'OCRBench'], dataset): + file_name += '_score.json' + else: + raise NotImplementedError + + if not osp.exists(file_name): + return {} + + data = load(file_name) + ret = {} + if dataset == 'CCBench': + ret[dataset] = data['Overall'][0] * 100 + elif dataset == 'MMBench': + for n, a in zip(data['split'], data['Overall']): + if n == 'dev': + ret['MMBench_DEV_EN'] = a * 100 + elif n == 'test': + ret['MMBench_TEST_EN'] = a * 100 + elif dataset == 'MMBench_CN': + for n, a in zip(data['split'], data['Overall']): + if n == 'dev': + ret['MMBench_DEV_CN'] = a * 100 + elif n == 'test': + ret['MMBench_TEST_CN'] = a * 100 + elif listinstr(['SEEDBench', 'ScienceQA', 'MMBench', 'AI2D_TEST', 'MMStar', 'RealWorldQA', 'BLINK'], dataset): + ret[dataset] = data['Overall'][0] * 100 + elif 'MME' == dataset: + ret[dataset] = data['perception'][0] + data['reasoning'][0] + elif 'MMVet' == dataset: + data = data[data['Category'] == 'Overall'] + ret[dataset] = float(data.iloc[0]['acc']) + elif 'HallusionBench' == dataset: + data = data[data['split'] == 'Overall'] + for met in ['aAcc', 'qAcc', 'fAcc']: + ret[dataset + f' ({met})'] = float(data.iloc[0][met]) + elif 'MMMU' in dataset: + data = data[data['split'] == 'validation'] + ret['MMMU (val)'] = float(data.iloc[0]['Overall']) * 100 + elif 'MathVista' in dataset: + data = data[data['Task&Skill'] == 'Overall'] + ret[dataset] = float(data.iloc[0]['acc']) + elif 'LLaVABench' in dataset: + data = data[data['split'] == 'overall'].iloc[0] + ret[dataset] = float(data['Relative Score (main)']) + elif 'OCRBench' in dataset: + ret[dataset] = data['Final Score'] + elif dataset == 'VisOnlyQA-VLMEvalKit': + for n, a in zip(data['split'], data['Overall']): + ret[f'VisOnlyQA-VLMEvalKit_{n}'] = a * 100 + + return ret + +def parse_args(): + parser = argparse.ArgumentParser() + parser.add_argument('--data', type=str, nargs='+', default=[]) + parser.add_argument("--model", type=str, nargs='+', required=True) + args = parser.parse_args() + return args + +def gen_table(models, datasets): + res = defaultdict(dict) + for m in models: + for d in datasets: + try: + res[m].update(get_score(m, d)) + except Exception as e: + logging.warning(f'{type(e)}: {e}') + logging.warning(f'Missing Results for Model {m} x Dataset {d}') + keys = [] + for m in models: + for d in res[m]: + keys.append(d) + keys = list(set(keys)) + keys.sort() + final = defaultdict(list) + for m in models: + final['Model'].append(m) + for k in keys: + if k in res[m]: + final[k].append(res[m][k]) + else: + final[k].append(None) + final = pd.DataFrame(final) + dump(final, 'summ.csv') + if len(final) >= len(final.iloc[0].keys()): + print(tabulate(final)) + else: + print(tabulate(final.T)) + +if __name__ == '__main__': + args = parse_args() + if args.data == []: + args.data = list(SUPPORTED_DATASETS) + gen_table(args.model, args.data) \ No newline at end of file diff --git a/scripts/visualize.ipynb b/scripts/visualize.ipynb new file mode 100644 index 0000000000000000000000000000000000000000..84d08dcfa3b1648c07036c763f7a07b1b2289873 --- /dev/null +++ b/scripts/visualize.ipynb @@ -0,0 +1,266 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import json\n", + "import copy as cp\n", + "import numpy as np\n", + "import matplotlib.pyplot as plt\n", + "import matplotlib.font_manager as fm\n", + "\n", + "def download_file(url, filename=None):\n", + " from urllib.request import urlretrieve\n", + " if filename is None:\n", + " filename = url.split('/')[-1]\n", + " urlretrieve(url, filename)\n", + "\n", + "font_URL = 'http://opencompass.openxlab.space/utils/Fonts/segoepr.ttf'\n", + "download_file(font_URL)\n", + "\n", + "font12 = fm.FontProperties(fname='segoepr.ttf', size=12)\n", + "font15 = fm.FontProperties(fname='segoepr.ttf', size=15, weight='bold')\n", + "font18 = fm.FontProperties(fname='segoepr.ttf', size=18, weight='bold')\n", + "\n", + "DATA_URL = 'http://opencompass.openxlab.space/utils/OpenVLM.json'\n", + "download_file(DATA_URL)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def pre_normalize(raw_data, labels):\n", + " data_list = cp.deepcopy(raw_data)\n", + " minimum, maximum, max_range, range_map = {}, {}, 0, {}\n", + " for lb in labels:\n", + " minimum[lb] = min([x[lb] for x in data_list])\n", + " maximum[lb] = max([x[lb] for x in data_list])\n", + " max_range = max(max_range, maximum[lb] - minimum[lb])\n", + " max_range *= 1.25\n", + " for lb in labels:\n", + " mid = (minimum[lb] + maximum[lb]) / 2\n", + " new_range = (mid - max_range / 2, mid + max_range / 2) if (mid + max_range / 2) < 100 else (100 - max_range, 100)\n", + " range_map[lb] = new_range\n", + " for item in data_list:\n", + " assert new_range[0] <= item[lb] <= new_range[1]\n", + " item[lb] = (item[lb] - new_range[0]) / max_range * 100\n", + " return data_list, range_map\n", + "\n", + "# solve the problem that some benchmark score is too high and out of range\n", + "def log_normalize(raw_data, labels):\n", + " data_list = cp.deepcopy(raw_data)\n", + " minimum, maximum, max_range, range_map = {}, {}, 0, {}\n", + " for lb in labels:\n", + " minimum[lb] = min([np.log(x[lb]) for x in data_list])\n", + " maximum[lb] = max([np.log(x[lb]) for x in data_list])\n", + " max_range = max(max_range, maximum[lb] - minimum[lb])\n", + " max_range *= 1.005\n", + " for lb in labels:\n", + " mid = (minimum[lb] + maximum[lb]) / 2\n", + " new_range = (mid - max_range / 2, mid + max_range / 2) if (mid + max_range / 2) < 100 else (100 - max_range, 100)\n", + " range_map[lb] = new_range\n", + " for item in data_list:\n", + " assert new_range[0] <= np.log(item[lb]) <= new_range[1]\n", + " item[lb] = (np.log(item[lb]) - new_range[0]) / max_range * 100\n", + " return data_list, range_map" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Draw MMBench Radar Graph\n", + "data = json.loads(open('OpenVLM.json').read())['results']\n", + "models = list(data)\n", + "print(models)\n", + "\n", + "# model2vis = [\n", + "# 'GPT-4v (detail: low)', 'GeminiProVision', 'Qwen-VL-Plus', \n", + "# 'InternLM-XComposer2-VL', 'LLaVA-v1.5-13B', 'CogVLM-17B-Chat',\n", + "# 'mPLUG-Owl2', 'Qwen-VL-Chat', 'IDEFICS-80B-Instruct'\n", + "# ]\n", + "\n", + "model2vis = [\n", + " # 'GPT-4v (detail: low)', 'GeminiProVision', 'InternLM-XComposer2-VL', \n", + " 'GPT-4v (1106, detail-low)', 'Gemini-1.0-Pro', 'Gemini-1.5-Pro', #'Gemini-1.5-Flash', 'Qwen-VL-Plus', \n", + " 'InternLM-XComposer2', 'LLaVA-v1.5-13B', 'CogVLM-17B-Chat',\n", + " 'mPLUG-Owl2', 'Qwen-VL-Chat', 'IDEFICS-80B-Instruct'\n", + "]\n", + "\n", + "colors = [\n", + " '#1f77b4', '#ff7f0e', '#2ca02c', '#d62728', '#9467bd', '#8c564b', \n", + " '#e377c2', '#7f7f7f', '#bcbd22'\n", + "]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from collections import defaultdict\n", + "\n", + "split = 'MMBench_TEST_EN'\n", + "# data_sub = {k: v[split] for k, v in data.items()}\n", + "data_sub = {k: defaultdict(int, v)[split] for k, v in data.items()}\n", + "# solve the problem that some model lack the evaluation of MMBench_TEST_EN\n", + "\n", + "labels = list(data_sub[model2vis[0]])\n", + "labels.remove('Overall')\n", + "num_vars = len(labels)\n", + "\n", + "raw_data = [data_sub[m] for m in model2vis]\n", + "data_list, range_map = pre_normalize(raw_data, labels)\n", + "\n", + "alpha = 0.25\n", + "angles = np.linspace(0, 2 * np.pi, num_vars, endpoint=False).tolist()\n", + "angles_deg = np.linspace(0, 360, num_vars, endpoint=False).tolist()\n", + "fig, ax_base = plt.subplots(nrows=1, ncols=1, figsize=(10, 10), subplot_kw=dict(polar=True))\n", + "\n", + "for i in range(len(data_list)):\n", + " item = data_list[i]\n", + " model_name = model2vis[i]\n", + " color = colors[i]\n", + " tmp_angles = angles[:] + [angles[0]]\n", + " tmp_values = [item[lb] for lb in labels] + [item[labels[0]]]\n", + " ax_base.plot(tmp_angles, tmp_values, color=color, linewidth=1, linestyle='solid', label=model_name)\n", + " ax_base.fill(tmp_angles, tmp_values, color=color, alpha=alpha)\n", + " \n", + "angles += [angles[0]]\n", + "ax_base.set_ylim(0, 100)\n", + "ax_base.set_yticks([40, 60, 80, 100])\n", + "ax_base.set_yticklabels([''] * 4)\n", + "\n", + "ax_base.tick_params(pad=25)\n", + "ax_base.set_xticks(angles[:-1])\n", + "ax_base.set_xticklabels(labels, fontproperties=font18)\n", + "\n", + "leg = ax_base.legend(loc='center right', bbox_to_anchor=(1.6, 0.5), prop=font15, ncol=1, frameon=True, labelspacing=1.2)\n", + "for line in leg.get_lines():\n", + " line.set_linewidth(2.5)\n", + "\n", + "cx, cy, sz = 0.44, 0.435, 0.34\n", + "axes = [fig.add_axes([cx - sz, cy - sz, cx + sz, cy + sz], projection='polar', label='axes%d' % i) for i in range(num_vars)]\n", + " \n", + "for ax, angle, label in zip(axes, angles_deg, labels):\n", + " ax.patch.set_visible(False)\n", + " ax.grid(False)\n", + " ax.xaxis.set_visible(False)\n", + " cur_range = range_map[label]\n", + " label_list = [cur_range[0] + (cur_range[1] - cur_range[0]) / 5 * i for i in range(2, 6)]\n", + " label_list = [f'{x:.1f}' for x in label_list]\n", + " ax.set_rgrids(range(40, 120, 20), angle=angle, labels=label_list, font_properties=font12)\n", + " ax.spines['polar'].set_visible(False)\n", + " ax.set_ylim(0, 100)\n", + "\n", + "title_text = f'{len(model2vis)} Representative VLMs on MMBench Test.'\n", + "plt.figtext(.7, .95, title_text, fontproperties=font18, ha='center')\n", + "plt.show()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "labels = ['SEEDBench_IMG', 'CCBench', 'MMBench_TEST_EN', 'MMBench_TEST_CN', 'MME', 'MMVet', 'MMMU_VAL', 'MathVista', 'HallusionBench', 'LLaVABench']\n", + "num_vars = len(labels)\n", + "\n", + "raw_data = [{k: data[m][k]['Overall'] for k in labels} for m in model2vis]\n", + "data_list, range_map = pre_normalize(raw_data, labels)\n", + "\n", + "alpha = 0.25\n", + "angles = np.linspace(0, 2 * np.pi, num_vars, endpoint=False).tolist()\n", + "angles_deg = np.linspace(0, 360, num_vars, endpoint=False).tolist()\n", + "fig, ax_base = plt.subplots(nrows=1, ncols=1, figsize=(10, 10), subplot_kw=dict(polar=True))\n", + "\n", + "for i in range(len(data_list)):\n", + " item = data_list[i]\n", + " model_name = model2vis[i]\n", + " color = colors[i]\n", + " tmp_angles = angles[:] + [angles[0]]\n", + " tmp_values = [item[lb] for lb in labels] + [item[labels[0]]]\n", + " ax_base.plot(tmp_angles, tmp_values, color=color, linewidth=1, linestyle='solid', label=model_name)\n", + " ax_base.fill(tmp_angles, tmp_values, color=color, alpha=alpha)\n", + " \n", + "angles += [angles[0]]\n", + "ax_base.set_ylim(0, 100)\n", + "ax_base.set_yticks([40, 60, 80, 100])\n", + "ax_base.set_yticklabels([''] * 4)\n", + "\n", + "ax_base.tick_params(pad=15)\n", + "ax_base.set_xticks(angles[:-1])\n", + "ax_base.set_xticklabels(labels, fontproperties=font18)\n", + "\n", + "dataset_map = {\n", + " 'MMBench_TEST_EN': 'MMBench (Test)', \n", + " 'MMBench_TEST_CN': 'MMBenchCN (Test)', \n", + " 'MathVista': 'MathVista (TestMini)', \n", + " 'MMMU_VAL': 'MMMU (Val)'\n", + "}\n", + "for i, label in enumerate(ax_base.get_xticklabels()):\n", + " x,y = label.get_position()\n", + " text = label.get_text()\n", + " text = dataset_map[text] if text in dataset_map else text\n", + " lab = ax_base.text(x, y, text, transform=label.get_transform(),\n", + " ha=label.get_ha(), va=label.get_va(), font_properties=font15)\n", + " lab.set_rotation(360 / num_vars * i + 270)\n", + " labels.append(lab)\n", + "ax_base.set_xticklabels([])\n", + "\n", + "leg = ax_base.legend(loc='center right', bbox_to_anchor=(1.6, 0.5), prop=font15, ncol=1, frameon=True, labelspacing=1.2)\n", + "for line in leg.get_lines():\n", + " line.set_linewidth(2.5)\n", + "\n", + "cx, cy, sz = 0.44, 0.435, 0.34\n", + "axes = [fig.add_axes([cx - sz, cy - sz, cx + sz, cy + sz], projection='polar', label='axes%d' % i) for i in range(num_vars)]\n", + " \n", + "for ax, angle, label in zip(axes, angles_deg, labels):\n", + " ax.patch.set_visible(False)\n", + " ax.grid(False)\n", + " ax.xaxis.set_visible(False)\n", + " cur_range = range_map[label]\n", + " label_list = [cur_range[0] + (cur_range[1] - cur_range[0]) / 5 * i for i in range(2, 6)]\n", + " label_list = [f'{x:.1f}' for x in label_list]\n", + " ax.set_rgrids(range(40, 120, 20), angle=angle, labels=label_list, font_properties=font12)\n", + " ax.spines['polar'].set_visible(False)\n", + " ax.set_ylim(0, 100)\n", + "\n", + "title_text = f'{len(model2vis)} Representative VLMs on {num_vars} Benchmarks in OpenCompass Multi-Modal Leaderboard.'\n", + "plt.figtext(.7, .95, title_text, fontproperties=font18, ha='center')\n", + "plt.show()" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "base", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.8.5" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/setup.py b/setup.py new file mode 100644 index 0000000000000000000000000000000000000000..8b7a894e968c5f87c8563ecb6b9045f8296051c6 --- /dev/null +++ b/setup.py @@ -0,0 +1,122 @@ +import re +import sys +from os.path import exists +from setuptools import find_packages, setup + + +def parse_requirements(fname='requirements.txt', with_version=True): + """Parse the package dependencies listed in a requirements file but strips + specific versioning information. + + Args: + fname (str): path to requirements file + with_version (bool, default=False): if True include version specs + + Returns: + List[str]: list of requirements items + + CommandLine: + python -c "import setup; print(setup.parse_requirements())" + """ + + require_fpath = fname + + def parse_line(line): + """Parse information from a line in a requirements text file.""" + if line.startswith('-r '): + # Allow specifying requirements in other files + target = line.split(' ')[1] + for info in parse_require_file(target): + yield info + else: + info = {'line': line} + if line.startswith('-e '): + info['package'] = line.split('#egg=')[1] + elif '@git+' in line: + info['package'] = line + else: + # Remove versioning from the package + pat = '(' + '|'.join(['>=', '==', '>']) + ')' + parts = re.split(pat, line, maxsplit=1) + parts = [p.strip() for p in parts] + + info['package'] = parts[0] + if len(parts) > 1: + op, rest = parts[1:] + if ';' in rest: + # Handle platform specific dependencies + # http://setuptools.readthedocs.io/en/latest/setuptools.html#declaring-platform-specific-dependencies + version, platform_deps = map(str.strip, + rest.split(';')) + info['platform_deps'] = platform_deps + else: + version = rest # NOQA + info['version'] = (op, version) + yield info + + def parse_require_file(fpath): + with open(fpath, 'r') as f: + for line in f.readlines(): + line = line.strip() + if line and not line.startswith('#'): + for info in parse_line(line): + yield info + + def gen_packages_items(): + if exists(require_fpath): + for info in parse_require_file(require_fpath): + parts = [info['package']] + if with_version and 'version' in info: + parts.extend(info['version']) + if not sys.version.startswith('3.4'): + # apparently package_deps are broken in 3.4 + platform_deps = info.get('platform_deps') + if platform_deps is not None: + parts.append(';' + platform_deps) + item = ''.join(parts) + yield item + + packages = list(gen_packages_items()) + return packages + + +with open('README.md') as f: + readme = f.read() + + +def do_setup(): + setup( + name='vlmeval', + version='0.1.0', + description='OpenCompass VLM Evaluation Kit', + author='Haodong Duan', + author_email='dhd.efz@gmail.com', + maintainer='Haodong Duan', + maintainer_email='dhd.efz@gmail.com', + long_description=readme, + long_description_content_type='text/markdown', + cmdclass={}, + install_requires=parse_requirements('requirements.txt'), + setup_requires=[], + python_requires='>=3.7.0', + packages=find_packages(exclude=[ + 'test*', + 'paper_test*', + ]), + keywords=['AI', 'NLP', 'in-context learning'], + entry_points={ + 'console_scripts': ['vlmutil = vlmeval:cli'] + }, + classifiers=[ + 'Programming Language :: Python :: 3.7', + 'Programming Language :: Python :: 3.8', + 'Programming Language :: Python :: 3.9', + 'Programming Language :: Python :: 3.10', + 'Intended Audience :: Developers', + 'Intended Audience :: Education', + 'Intended Audience :: Science/Research', + ]) + + +if __name__ == '__main__': + do_setup() diff --git a/test.ipynb b/test.ipynb new file mode 100644 index 0000000000000000000000000000000000000000..2bbe1f6a8b63b4238e21ca9ef47ac9c89970f718 --- /dev/null +++ b/test.ipynb @@ -0,0 +1,86 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "100%|██████████| 2788/2788 [00:09<00:00, 286.38it/s]\n" + ] + } + ], + "source": [ + "import os\n", + "import json\n", + "save_dir = \"/user/konglingyu/VLMEvalKit/public_eval/dr_800_emma/EMMA_train_prompt_sampling/20250424\"\n", + "data = {}\n", + "for i in range(8):\n", + " assert os.path.exists(f\"{save_dir}/results_{i}.json\")\n", + " data.update(json.load(open(f\"{save_dir}/results_{i}.json\", \"r\")))\n", + "assert len(data) == 2788\n", + "with open(f\"{save_dir}/results.json\", \"w\") as f:\n", + " json.dump(data, f, indent=4)\n", + "from EMMA.evaluation.evaluate import gen_true_false\n", + "from EMMA.evaluation.calculate_acc import gen_score\n", + "gen_true_false(f\"{save_dir}/results.json\")\n", + "gen_score(f\"{save_dir}/results.json\", f\"{save_dir}/results_acc.json\")" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Looking in indexes: https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple, https://pypi.ngc.nvidia.com\n", + "Collecting word2number\n", + " Downloading https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/4a/29/a31940c848521f0725f0df6b25dca8917f13a2025b0e8fcbe5d0457e45e6/word2number-1.1.zip (9.7 kB)\n", + " Preparing metadata (setup.py) ... \u001b[?25ldone\n", + "\u001b[?25hBuilding wheels for collected packages: word2number\n", + " Building wheel for word2number (setup.py) ... \u001b[?25ldone\n", + "\u001b[?25h Created wheel for word2number: filename=word2number-1.1-py3-none-any.whl size=5625 sha256=10743444572815e697e0ed1faa83d7468f568bad5e9f9683d681fd6de2a964cd\n", + " Stored in directory: /tmp/pip-ephem-wheel-cache-q98517p9/wheels/99/3a/6c/d8c11ef6bc6ecfba03cda750ca0ed469689c0494af888bc94b\n", + "Successfully built word2number\n", + "Installing collected packages: word2number\n", + "Successfully installed word2number-1.1\n", + "\u001b[33mWARNING: Running pip as the 'root' user can result in broken permissions and conflicting behaviour with the system package manager, possibly rendering your system unusable. It is recommended to use a virtual environment instead: https://pip.pypa.io/warnings/venv. Use the --root-user-action option if you know what you are doing and want to suppress this warning.\u001b[0m\u001b[33m\n", + "\u001b[0m\u001b[31mERROR: Operation cancelled by user\u001b[0m\u001b[31m\n", + "\u001b[0m^C\n", + "Note: you may need to restart the kernel to use updated packages.\n" + ] + } + ], + "source": [ + "%pip install word2number -i https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.9" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/utils/count.py b/utils/count.py new file mode 100644 index 0000000000000000000000000000000000000000..b11d8f54b7ffd1fd71c07d0ebf84c3dfb9d7a042 --- /dev/null +++ b/utils/count.py @@ -0,0 +1,116 @@ +import jsonlines +import re +import os +from matplotlib import pyplot as plt + +def count_bbox(answer: str): + PATTERN = re.compile(r'\((.*?)\),\((.*?)\)') + bbox_num = len(re.findall(PATTERN, answer)) + return bbox_num + +save_ckpt_lst = [350] + +mmmu_box_ratio, mmmu_avg_box_num = [], [] +mathvista_box_ratio, mathvista_avg_box_num = [], [] +wtq_box_ratio, wtq_avg_box_num = [], [] +mmdocbench_box_ratio, mmdocbench_avg_box_num = [], [] +tablefact_box_ratio, tablefact_avg_box_num = [], [] + +for results in os.listdir("/user/xuqixin/checkpoints/qwen25_vl-7b/grpo_v7_exp10_qwen25_vl_sft_bbox_grpo_opensource_doc/results"): + for ckpt in save_ckpt_lst: + if not results.endswith(f"_{ckpt}.jsonl"): + continue + with jsonlines.open(f"/user/xuqixin/checkpoints/qwen25_vl-7b/grpo_v7_exp10_qwen25_vl_sft_bbox_grpo_opensource_doc/results/{results}") as reader: + total_num = 0 + total_bbox_num = 0 + bbox_sample_num = 0 + already_print = 0 + print("##############################################################################") + print(f"results: {results}") + for obj in reader: + total_num += 1 + answer = obj["output"] + bbox_num = count_bbox(answer) + total_bbox_num += bbox_num + if bbox_num > 0: + bbox_sample_num += 1 + if already_print < 5: + print(obj) + print("\n") + already_print += 1 + # print(f"results: {results}") + # print(f"total_num: {total_num}, total_bbox_num: {total_bbox_num}, bbox_sample_num: {bbox_sample_num}") + # print(f"average box num: {total_bbox_num / bbox_sample_num}, bbox num ratio: {bbox_sample_num / total_num}") + + if "mmmu" in results: + # maintain only 2 decimal places + mmmu_box_ratio.append(round(bbox_sample_num / total_num, 3)) + mmmu_avg_box_num.append(round(total_bbox_num / bbox_sample_num, 3)) + elif "mathvista" in results: + mathvista_box_ratio.append(round(bbox_sample_num / total_num, 3)) + mathvista_avg_box_num.append(round(total_bbox_num / bbox_sample_num, 3)) + elif "wtq" in results: + wtq_box_ratio.append(round(bbox_sample_num / total_num, 3)) + wtq_avg_box_num.append(round(total_bbox_num / bbox_sample_num, 3)) + elif "mmdocbench" in results: + mmdocbench_box_ratio.append(round(bbox_sample_num / total_num, 3)) + mmdocbench_avg_box_num.append(round(total_bbox_num / bbox_sample_num, 3)) + elif "tablefact" in results: + tablefact_box_ratio.append(round(bbox_sample_num / total_num, 3)) + tablefact_avg_box_num.append(round(total_bbox_num / bbox_sample_num, 3)) + + +# plot the figure +plt.figure(figsize=(10, 5)) +plt.subplot(1, 2, 1) +plt.plot(save_ckpt_lst, mmmu_box_ratio, label='mmmu') +plt.plot(save_ckpt_lst, mathvista_box_ratio, label='mathvista') +plt.plot(save_ckpt_lst, wtq_box_ratio, label='wtq') +plt.plot(save_ckpt_lst, mmdocbench_box_ratio, label='mmdocbench') +plt.plot(save_ckpt_lst, tablefact_box_ratio, label='tablefact') +plt.xlabel('Checkpoint') +plt.ylabel('Box Ratio') +plt.title('Box Ratio vs Checkpoint') +plt.legend() +plt.subplot(1, 2, 2) +plt.plot(save_ckpt_lst, mmmu_avg_box_num, label='mmmu') +plt.plot(save_ckpt_lst, mathvista_avg_box_num, label='mathvista') +plt.plot(save_ckpt_lst, wtq_avg_box_num, label='wtq') +plt.plot(save_ckpt_lst, mmdocbench_avg_box_num, label='mmdocbench') +plt.plot(save_ckpt_lst, tablefact_avg_box_num, label='tablefact') +plt.xlabel('Checkpoint') +plt.ylabel('Average Box Number') +plt.title('Average Box Number vs Checkpoint') +plt.legend() +plt.tight_layout() +plt.savefig('/user/xuqixin/checkpoints/qwen25_vl-7b/grpo_v7_exp10_qwen25_vl_sft_bbox_grpo_opensource_doc/box_ratio.png') +plt.show() + +print("##############################################################################") +print("mmmu box ratio:", mmmu_box_ratio) +print("mathvista box ratio:", mathvista_box_ratio) +print("wtq box ratio:", wtq_box_ratio) +print("mmdocbench box ratio:", mmdocbench_box_ratio) +print("tablefact box ratio:", tablefact_box_ratio) + +print("mmmu avg box num:", mmmu_avg_box_num) +print("mathvista avg box num:", mathvista_avg_box_num) +print("wtq avg box num:", wtq_avg_box_num) +print("mmdocbench avg box num:", mmdocbench_avg_box_num) +print("tablefact avg box num:", tablefact_avg_box_num) + +# results: mmmu_350.jsonl +# total_num: 894, total_bbox_num: 52, bbox_sample_num: 31 +# average box num: 1.6774193548387097, bbox num ratio: 0.03467561521252797 +# results: mathvista_350.jsonl +# total_num: 998, total_bbox_num: 157, bbox_sample_num: 134 +# average box num: 1.171641791044776, bbox num ratio: 0.1342685370741483 +# results: wtq_350.jsonl +# total_num: 868, total_bbox_num: 1015, bbox_sample_num: 738 +# average box num: 1.3753387533875339, bbox num ratio: 0.8502304147465438 +# results: mmdocbench_350.jsonl +# total_num: 1000, total_bbox_num: 517, bbox_sample_num: 409 +# average box num: 1.2640586797066016, bbox num ratio: 0.409 +# results: tablefact_350.jsonl +# total_num: 868, total_bbox_num: 816, bbox_sample_num: 597 +# average box num: 1.3668341708542713, bbox num ratio: 0.6877880184331797 \ No newline at end of file diff --git a/utils/datasource.py b/utils/datasource.py new file mode 100644 index 0000000000000000000000000000000000000000..1c64234191d00ad7c5f243d2a12a56e96af4003d --- /dev/null +++ b/utils/datasource.py @@ -0,0 +1,13 @@ +import json +from collections import defaultdict + +ds = defaultdict(int) + +with open('public_eval/bbox_step_300/MathVista_MINI/20250418/bbox_step_300/bbox_step_300_MathVista_MINI_bbox.json') as infile: + data = json.load(infile) + +for item in data: + source = item['source'] + ds[source] += 1 + +print(ds) \ No newline at end of file diff --git a/utils/plot.py b/utils/plot.py new file mode 100644 index 0000000000000000000000000000000000000000..af8a668cfe9edf29de4a450fe7af91d2d1a3b6d9 --- /dev/null +++ b/utils/plot.py @@ -0,0 +1,57 @@ +import re +from PIL import Image, ImageDraw + +def get_boxed_image(image, sub_text, alpha=0.3): + ''' + image: Image.Image. + sub_text: String like "(x1, y1), (x2, y2), (x3, y3), (x4, y4)", or it's illegal. + alpha: The transparency of the box. + ''' + + bbox_list = [(int(x), int(y)) for x, y in re.findall(r'\((\d+),(\d+)\)', sub_text)] + + if len(bbox_list) % 2 != 0: + print(f"The coordinate string {sub_text} is illegal, as we extract {len(bbox_list)} coordinates from it!") + return None + + image = Image.open(image) + width, height = image.size + image = image.convert("RGBA") + overlay = Image.new("RGBA", image.size, (255, 0, 0, 0)) + + draw = ImageDraw.Draw(overlay) + red_color = (255, 0, 0, int(255 * alpha)) + border_width = max(1, int(min(image.size) * 0.005)) + + for i in range(0, len(bbox_list), 2): + x1, y1 = bbox_list[i] + x2, y2 = bbox_list[i + 1] + + x1, y1, x2, y2 = int(x1/999 * width), int(y1/999 * height), int(x2/999 * width), int(y2/999 * height) + + x1 = max(0, min(x1, width)) + y1 = max(0, min(y1, height)) + x2 = max(0, min(x2, width)) + y2 = max(0, min(y2, height)) + + if x2 <= x1 or y2 <= y1: + print(f"The uniformed coordinates ({x1}, {y1}), ({x2}, {y2}) are illegal, as the positions of them are wrong!") + return None + + for i in range(border_width): + draw.rectangle( + [x1 - i, y1 - i, x2 + i, y2 + i], + outline=red_color + ) + + return Image.alpha_composite(image, overlay).convert("RGB") + +id = 1673 +image = f'public_eval/bbox_step_300/MathVerse_MINI/20250418/images/{id}.png' +text = '''This table presents information about a point A in relation to a reference point O, including its true bearing and compass direction.\n\nStep 1. Identify the column 'True Bearing'(190,269),(388,935), which contains the true bearing of point A from point O.\nStep 2. Locate the value in the column 'True Bearing': 32°.\nStep 3. Conclude that the true bearing of A from O is 32°.\n32°''' + +box_image = get_boxed_image(image=image, sub_text=text) +if box_image: + box_image.show() + box_image.save(image.replace('.png', '_bbox.png')) + diff --git a/utils/xlsx2json.py b/utils/xlsx2json.py new file mode 100644 index 0000000000000000000000000000000000000000..937ea7abe3d2537ab13b53c232c9bd880ccb001b --- /dev/null +++ b/utils/xlsx2json.py @@ -0,0 +1,41 @@ +import pandas as pd +import jsonlines +import re +import random + +def convert_excel_to_json(excel_file, json_file): + df = pd.read_excel(excel_file) + df = df.where(pd.notnull(df), None) + df.to_json(json_file, orient='records', lines=True, default_handler=str) + +def count_bbox(answer: str): + PATTERN = re.compile(r'\((.*?)\),\((.*?)\)') + bbox_num = len(re.findall(PATTERN, answer)) + return bbox_num + +xlsx_input = 'public_eval/bbox_step_300/MathVerse_MINI/20250418/bbox_step_300/bbox_step_300_MathVerse_MINI.xlsx' +json_output = xlsx_input.replace('.xlsx', '.jsonl') +convert_excel_to_json(xlsx_input, json_output) + +box_output = [] +with jsonlines.open(json_output, 'r') as reader: + for obj in reader: + total_num = 0 + total_bbox_num = 0 + bbox_sample_num = 0 + for obj in reader: + total_num += 1 + answer = obj["full_prediction"] + bbox_num = count_bbox(answer) + total_bbox_num += bbox_num + if bbox_num > 0: + bbox_sample_num += 1 + box_output.append(obj) + +random.shuffle(box_output) +with jsonlines.open(json_output.replace('.jsonl', '_bbox.jsonl'), 'w') as writer: + for obj in box_output: + writer.write(obj) + +print(f"total_num: {total_num}, total_bbox_num: {total_bbox_num}, bbox_sample_num: {bbox_sample_num}") +print(f"average box num: {total_bbox_num / bbox_sample_num}, bbox num ratio: {bbox_sample_num / total_num}") diff --git a/vlmeval/__init__.py b/vlmeval/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..f14739fd39bd1c8c3dce45b8e609a00df9a1c91d --- /dev/null +++ b/vlmeval/__init__.py @@ -0,0 +1,16 @@ +try: + import torch +except ImportError: + pass + +from .smp import * +from .api import * +from .dataset import * +from .utils import * +from .vlm import * +from .config import * +from .tools import cli + +load_env() + +__version__ = '0.2rc1' diff --git a/vlmeval/api/__init__.py b/vlmeval/api/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..d7e786ae47eb34c48e764d5b8e393b631c5ca9db --- /dev/null +++ b/vlmeval/api/__init__.py @@ -0,0 +1,30 @@ +from .gpt import OpenAIWrapper, GPT4V +from .hf_chat_model import HFChatModel +from .gemini import GeminiWrapper, GeminiProVision +from .qwen_vl_api import QwenVLWrapper, QwenVLAPI, Qwen2VLAPI +from .qwen_api import QwenAPI +from .claude import Claude_Wrapper, Claude3V +from .reka import Reka +from .glm_vision import GLMVisionAPI +from .cloudwalk import CWWrapper +from .sensechat_vision import SenseChatVisionAPI +from .siliconflow import SiliconFlowAPI, TeleMMAPI +from .hunyuan import HunyuanVision +from .bailingmm import bailingMMAPI +from .bluelm_v_api import BlueLMWrapper, BlueLM_V_API +from .jt_vl_chat import JTVLChatAPI +from .taiyi import TaiyiAPI +from .lmdeploy import LMDeployAPI +from .taichu import TaichuVLAPI, TaichuVLRAPI +from .doubao_vl_api import DoubaoVL +from .mug_u import MUGUAPI + +__all__ = [ + 'OpenAIWrapper', 'HFChatModel', 'GeminiWrapper', 'GPT4V', + 'GeminiProVision', 'QwenVLWrapper', 'QwenVLAPI', 'QwenAPI', + 'Claude3V', 'Claude_Wrapper', 'Reka', 'GLMVisionAPI', + 'CWWrapper', 'SenseChatVisionAPI', 'HunyuanVision', 'Qwen2VLAPI', + 'BlueLMWrapper', 'BlueLM_V_API', 'JTVLChatAPI', 'bailingMMAPI', + 'TaiyiAPI', 'TeleMMAPI', 'SiliconFlowAPI', 'LMDeployAPI', + 'TaichuVLAPI', 'TaichuVLRAPI', 'DoubaoVL', "MUGUAPI" +] diff --git a/vlmeval/api/base.py b/vlmeval/api/base.py new file mode 100644 index 0000000000000000000000000000000000000000..98eef51809f8c414b22ed7fde36174a0181d064e --- /dev/null +++ b/vlmeval/api/base.py @@ -0,0 +1,289 @@ +import time +import random as rd +from abc import abstractmethod +import os.path as osp +import copy as cp +from ..smp import get_logger, parse_file, concat_images_vlmeval, LMUDataRoot, md5, decode_base64_to_image_file + + +class BaseAPI: + + allowed_types = ['text', 'image'] + INTERLEAVE = True + INSTALL_REQ = False + + def __init__(self, + retry=10, + wait=3, + system_prompt=None, + verbose=True, + fail_msg='Failed to obtain answer via API.', + **kwargs): + """Base Class for all APIs. + + Args: + retry (int, optional): The retry times for `generate_inner`. Defaults to 10. + wait (int, optional): The wait time after each failed retry of `generate_inner`. Defaults to 3. + system_prompt (str, optional): Defaults to None. + verbose (bool, optional): Defaults to True. + fail_msg (str, optional): The message to return when failed to obtain answer. + Defaults to 'Failed to obtain answer via API.'. + **kwargs: Other kwargs for `generate_inner`. + """ + + self.wait = wait + self.retry = retry + self.system_prompt = system_prompt + self.verbose = verbose + self.fail_msg = fail_msg + self.logger = get_logger('ChatAPI') + + if len(kwargs): + self.logger.info(f'BaseAPI received the following kwargs: {kwargs}') + self.logger.info('Will try to use them as kwargs for `generate`. ') + self.default_kwargs = kwargs + + @abstractmethod + def generate_inner(self, inputs, **kwargs): + """The inner function to generate the answer. + + Returns: + tuple(int, str, str): ret_code, response, log + """ + self.logger.warning('For APIBase, generate_inner is an abstract method. ') + assert 0, 'generate_inner not defined' + ret_code, answer, log = None, None, None + # if ret_code is 0, means succeed + return ret_code, answer, log + + def working(self): + """If the API model is working, return True, else return False. + + Returns: + bool: If the API model is working, return True, else return False. + """ + self.old_timeout = None + if hasattr(self, 'timeout'): + self.old_timeout = self.timeout + self.timeout = 120 + + retry = 5 + while retry > 0: + ret = self.generate('hello') + if ret is not None and ret != '' and self.fail_msg not in ret: + if self.old_timeout is not None: + self.timeout = self.old_timeout + return True + retry -= 1 + + if self.old_timeout is not None: + self.timeout = self.old_timeout + return False + + def check_content(self, msgs): + """Check the content type of the input. Four types are allowed: str, dict, liststr, listdict. + + Args: + msgs: Raw input messages. + + Returns: + str: The message type. + """ + if isinstance(msgs, str): + return 'str' + if isinstance(msgs, dict): + return 'dict' + if isinstance(msgs, list): + types = [self.check_content(m) for m in msgs] + if all(t == 'str' for t in types): + return 'liststr' + if all(t == 'dict' for t in types): + return 'listdict' + return 'unknown' + + def preproc_content(self, inputs): + """Convert the raw input messages to a list of dicts. + + Args: + inputs: raw input messages. + + Returns: + list(dict): The preprocessed input messages. Will return None if failed to preprocess the input. + """ + if self.check_content(inputs) == 'str': + return [dict(type='text', value=inputs)] + elif self.check_content(inputs) == 'dict': + assert 'type' in inputs and 'value' in inputs + return [inputs] + elif self.check_content(inputs) == 'liststr': + res = [] + for s in inputs: + mime, pth = parse_file(s) + if mime is None or mime == 'unknown': + res.append(dict(type='text', value=s)) + else: + res.append(dict(type=mime.split('/')[0], value=pth)) + return res + elif self.check_content(inputs) == 'listdict': + for item in inputs: + assert 'type' in item and 'value' in item + mime, s = parse_file(item['value']) + if mime is None: + assert item['type'] == 'text', item['value'] + else: + assert mime.split('/')[0] == item['type'] + item['value'] = s + return inputs + else: + return None + + # May exceed the context windows size, so try with different turn numbers. + def chat_inner(self, inputs, **kwargs): + _ = kwargs.pop('dataset', None) + while len(inputs): + try: + return self.generate_inner(inputs, **kwargs) + except Exception as e: + if self.verbose: + self.logger.info(f'{type(e)}: {e}') + inputs = inputs[1:] + while len(inputs) and inputs[0]['role'] != 'user': + inputs = inputs[1:] + continue + return -1, self.fail_msg + ': ' + 'Failed with all possible conversation turns.', None + + def chat(self, messages, **kwargs1): + """The main function for multi-turn chatting. Will call `chat_inner` with the preprocessed input messages.""" + assert hasattr(self, 'chat_inner'), 'The API model should has the `chat_inner` method. ' + for msg in messages: + assert isinstance(msg, dict) and 'role' in msg and 'content' in msg, msg + assert self.check_content(msg['content']) in ['str', 'dict', 'liststr', 'listdict'], msg + msg['content'] = self.preproc_content(msg['content']) + # merge kwargs + kwargs = cp.deepcopy(self.default_kwargs) + kwargs.update(kwargs1) + + answer = None + # a very small random delay [0s - 0.5s] + T = rd.random() * 0.5 + time.sleep(T) + + assert messages[-1]['role'] == 'user' + + for i in range(self.retry): + try: + ret_code, answer, log = self.chat_inner(messages, **kwargs) + if ret_code == 0 and self.fail_msg not in answer and answer != '': + if self.verbose: + print(answer) + return answer + elif self.verbose: + if not isinstance(log, str): + try: + log = log.text + except Exception as e: + self.logger.warning(f'Failed to parse {log} as an http response: {str(e)}. ') + self.logger.info(f'RetCode: {ret_code}\nAnswer: {answer}\nLog: {log}') + except Exception as err: + if self.verbose: + self.logger.error(f'An error occured during try {i}: ') + self.logger.error(f'{type(err)}: {err}') + # delay before each retry + T = rd.random() * self.wait * 2 + time.sleep(T) + + return self.fail_msg if answer in ['', None] else answer + + def preprocess_message_with_role(self, message): + system_prompt = '' + new_message = [] + + for data in message: + assert isinstance(data, dict) + role = data.pop('role', 'user') + if role == 'system': + system_prompt += data['value'] + '\n' + else: + new_message.append(data) + + if system_prompt != '': + if self.system_prompt is None: + self.system_prompt = system_prompt + else: + self.system_prompt += '\n' + system_prompt + return new_message + + def generate(self, message, **kwargs1): + """The main function to generate the answer. Will call `generate_inner` with the preprocessed input messages. + + Args: + message: raw input messages. + + Returns: + str: The generated answer of the Failed Message if failed to obtain answer. + """ + if self.check_content(message) == 'listdict': + message = self.preprocess_message_with_role(message) + + assert self.check_content(message) in ['str', 'dict', 'liststr', 'listdict'], f'Invalid input type: {message}' + message = self.preproc_content(message) + assert message is not None and self.check_content(message) == 'listdict' + for item in message: + assert item['type'] in self.allowed_types, f'Invalid input type: {item["type"]}' + + # merge kwargs + kwargs = cp.deepcopy(self.default_kwargs) + kwargs.update(kwargs1) + + answer = None + # a very small random delay [0s - 0.5s] + T = rd.random() * 0.5 + time.sleep(T) + + for i in range(self.retry): + try: + ret_code, answer, log = self.generate_inner(message, **kwargs) + if ret_code == 0 and self.fail_msg not in answer and answer != '': + if self.verbose: + print(answer) + return answer + elif self.verbose: + if not isinstance(log, str): + try: + log = log.text + except Exception as e: + self.logger.warning(f'Failed to parse {log} as an http response: {str(e)}. ') + self.logger.info(f'RetCode: {ret_code}\nAnswer: {answer}\nLog: {log}') + except Exception as err: + if self.verbose: + self.logger.error(f'An error occured during try {i}: ') + self.logger.error(f'{type(err)}: {err}') + # delay before each retry + T = rd.random() * self.wait * 2 + time.sleep(T) + + return self.fail_msg if answer in ['', None] else answer + + def message_to_promptimg(self, message, dataset=None): + assert not self.INTERLEAVE + model_name = self.__class__.__name__ + import warnings + warnings.warn( + f'Model {model_name} does not support interleaved input. ' + 'Will use the first image and aggregated texts as prompt. ') + num_images = len([x for x in message if x['type'] == 'image']) + if num_images == 0: + prompt = '\n'.join([x['value'] for x in message if x['type'] == 'text']) + image = None + elif num_images == 1: + prompt = '\n'.join([x['value'] for x in message if x['type'] == 'text']) + image = [x['value'] for x in message if x['type'] == 'image'][0] + else: + prompt = '\n'.join([x['value'] if x['type'] == 'text' else '' for x in message]) + if dataset == 'BLINK': + image = concat_images_vlmeval( + [x['value'] for x in message if x['type'] == 'image'], + target_size=512) + else: + image = [x['value'] for x in message if x['type'] == 'image'][0] + return prompt, image diff --git a/vlmeval/api/claude.py b/vlmeval/api/claude.py new file mode 100644 index 0000000000000000000000000000000000000000..11f5ae5c7185be47c6d3e190756d211c679ae598 --- /dev/null +++ b/vlmeval/api/claude.py @@ -0,0 +1,146 @@ +from vlmeval.smp import * +from vlmeval.api.base import BaseAPI +from time import sleep +import base64 +import mimetypes +from PIL import Image + +alles_url = 'https://openxlab.org.cn/gw/alles-apin-hub/v1/claude/v1/text/chat' +alles_headers = { + 'alles-apin-token': '', + 'Content-Type': 'application/json' +} +official_url = 'https://api.anthropic.com/v1/messages' +official_headers = { + 'x-api-key': '', + 'anthropic-version': '2023-06-01', + 'content-type': 'application/json' +} + + +class Claude_Wrapper(BaseAPI): + + is_api: bool = True + + def __init__(self, + backend: str = 'alles', + model: str = 'claude-3-opus-20240229', + key: str = None, + retry: int = 10, + timeout: int = 60, + wait: int = 3, + system_prompt: str = None, + verbose: bool = True, + temperature: float = 0, + max_tokens: int = 2048, + **kwargs): + + if os.environ.get('ANTHROPIC_BACKEND', '') == 'official': + backend = 'official' + + assert backend in ['alles', 'official'], f'Invalid backend: {backend}' + self.backend = backend + self.url = alles_url if backend == 'alles' else official_url + self.model = model + self.temperature = temperature + self.max_tokens = max_tokens + self.headers = alles_headers if backend == 'alles' else official_headers + self.timeout = timeout + + if key is not None: + self.key = key + else: + self.key = os.environ.get('ALLES', '') if self.backend == 'alles' else os.environ.get('ANTHROPIC_API_KEY', '') # noqa: E501 + + if self.backend == 'alles': + self.headers['alles-apin-token'] = self.key + else: + self.headers['x-api-key'] = self.key + + super().__init__(retry=retry, wait=wait, verbose=verbose, system_prompt=system_prompt, **kwargs) + + def encode_image_file_to_base64(self, image_path, target_size=-1, fmt='.jpg'): + image = Image.open(image_path) + if fmt in ('.jpg', '.jpeg'): + format = 'JPEG' + elif fmt == '.png': + format = 'PNG' + else: + print(f'Unsupported image format: {fmt}, will cause media type match error.') + + return encode_image_to_base64(image, target_size=target_size, fmt=format) + + # inputs can be a lvl-2 nested list: [content1, content2, content3, ...] + # content can be a string or a list of image & text + def prepare_itlist(self, inputs): + assert np.all([isinstance(x, dict) for x in inputs]) + has_images = np.sum([x['type'] == 'image' for x in inputs]) + if has_images: + content_list = [] + for msg in inputs: + if msg['type'] == 'text' and msg['value'] != '': + content_list.append(dict(type='text', text=msg['value'])) + elif msg['type'] == 'image': + pth = msg['value'] + suffix = osp.splitext(pth)[-1].lower() + media_type = mimetypes.types_map.get(suffix, None) + assert media_type is not None + + content_list.append(dict( + type='image', + source={ + 'type': 'base64', + 'media_type': media_type, + 'data': self.encode_image_file_to_base64(pth, target_size=4096, fmt=suffix) + })) + else: + assert all([x['type'] == 'text' for x in inputs]) + text = '\n'.join([x['value'] for x in inputs]) + content_list = [dict(type='text', text=text)] + return content_list + + def prepare_inputs(self, inputs): + input_msgs = [] + assert isinstance(inputs, list) and isinstance(inputs[0], dict) + assert np.all(['type' in x for x in inputs]) or np.all(['role' in x for x in inputs]), inputs + if 'role' in inputs[0]: + assert inputs[-1]['role'] == 'user', inputs[-1] + for item in inputs: + input_msgs.append(dict(role=item['role'], content=self.prepare_itlist(item['content']))) + else: + input_msgs.append(dict(role='user', content=self.prepare_itlist(inputs))) + return input_msgs + + def generate_inner(self, inputs, **kwargs) -> str: + payload = { + 'model': self.model, + 'max_tokens': self.max_tokens, + 'messages': self.prepare_inputs(inputs), + **kwargs + } + if self.system_prompt is not None: + payload['system'] = self.system_prompt + + response = requests.request('POST', self.url, headers=self.headers, data=json.dumps(payload), timeout=self.timeout * 1.1) + ret_code = response.status_code + ret_code = 0 if (200 <= int(ret_code) < 300) else ret_code + answer = self.fail_msg + + try: + resp_struct = json.loads(response.text) + if self.backend == 'alles': + answer = resp_struct['data']['content'][0]['text'].strip() + elif self.backend == 'official': + answer = resp_struct['content'][0]['text'].strip() + except Exception as err: + if self.verbose: + self.logger.error(f'{type(err)}: {err}') + self.logger.error(response.text if hasattr(response, 'text') else response) + + return ret_code, answer, response + + +class Claude3V(Claude_Wrapper): + + def generate(self, message, dataset=None): + return super(Claude_Wrapper, self).generate(message) diff --git a/vlmeval/api/cloudwalk.py b/vlmeval/api/cloudwalk.py new file mode 100644 index 0000000000000000000000000000000000000000..56b6ca2ec4e85726894e75e3c87b65e5402ad328 --- /dev/null +++ b/vlmeval/api/cloudwalk.py @@ -0,0 +1,107 @@ +from ..smp import * +import os +from .base import BaseAPI + + +class CWWrapper(BaseAPI): + + is_api: bool = True + + def __init__(self, + model: str = 'cw-congrong-v1.5', + retry: int = 10, + wait: int = 5, + key: str = None, + verbose: bool = True, + system_prompt: str = None, + temperature: float = 0, + timeout: int = 600, + api_base: str = 'http://cwapi-vlm01.cw_rb.azurebot.tk/v1/chat/completions', + max_tokens: int = 2048, + img_size: int = 512, + img_detail: str = 'low', + **kwargs): + + self.model = model + self.cur_idx = 0 + self.fail_msg = 'Failed to obtain answer via API. ' + self.max_tokens = max_tokens + self.temperature = temperature + + base = os.environ.get('CW_API_BASE', None) + self.api_base = base if base is not None else api_base + + env_key = os.environ.get('CW_API_KEY', None) + self.key = env_key if env_key is not None else key + assert self.key is not None, 'API key not provided. Please set CW_API_KEY environment variable or \ + pass it to the constructor.' + + assert img_size > 0 or img_size == -1 + self.img_size = -1 # allways send full size image + assert img_detail in ['high', 'low'] + self.img_detail = img_detail + + self.vision = True + self.timeout = timeout + + super().__init__(wait=wait, retry=retry, system_prompt=system_prompt, verbose=verbose, **kwargs) + + # inputs can be a lvl-2 nested list: [content1, content2, content3, ...] + # content can be a string or a list of image & text + def prepare_inputs(self, inputs): + input_msgs = [] + if self.system_prompt is not None: + input_msgs.append(dict(role='system', content=self.system_prompt)) + has_images = np.sum([x['type'] == 'image' for x in inputs]) + if has_images: + content_list = [] + for msg in inputs: + if msg['type'] == 'text': + content_list.append(dict(type='text', text=msg['value'])) + elif msg['type'] == 'image': + from PIL import Image + img = Image.open(msg['value']) + b64 = encode_image_to_base64(img, target_size=self.img_size) + img_struct = dict(url=f"data:image/jpeg;base64,{b64}", detail=self.img_detail) + content_list.append(dict(type='image_url', image_url=img_struct)) + input_msgs.append(dict(role='user', content=content_list)) + else: + assert all([x['type'] == 'text' for x in inputs]) + text = '\n'.join([x['value'] for x in inputs]) + input_msgs.append(dict(role='user', content=text)) + return input_msgs + + def generate_inner(self, inputs, **kwargs) -> str: + input_msgs = self.prepare_inputs(inputs) + temperature = kwargs.pop('temperature', self.temperature) + max_tokens = kwargs.pop('max_tokens', self.max_tokens) + + if 0 < max_tokens <= 100: + self.logger.warning( + 'Less than 100 tokens left, ' + 'may exceed the context window with some additional meta symbols. ' + ) + if max_tokens <= 0: + return 0, self.fail_msg + 'Input string longer than context window. ', 'Length Exceeded. ' + + headers = {'Content-Type': 'application/json', 'Authorization': f'{self.key}'} + payload = dict( + model=self.model, + messages=input_msgs, + max_tokens=max_tokens, + n=1, + temperature=temperature, + **kwargs) + response = requests.post(self.api_base, headers=headers, data=json.dumps(payload), timeout=self.timeout * 1.1) + ret_code = response.status_code + ret_code = 0 if (200 <= int(ret_code) < 300) else ret_code + answer = self.fail_msg + try: + resp_struct = json.loads(response.text) + answer = resp_struct['choices'][0]['message']['content'].strip() + except Exception as err: + if self.verbose: + self.logger.error(f'{type(err)}: {err}') + self.logger.error(response.text if hasattr(response, 'text') else response) + + return ret_code, answer, response diff --git a/vlmeval/api/doubao_vl_api.py b/vlmeval/api/doubao_vl_api.py new file mode 100644 index 0000000000000000000000000000000000000000..9f69cfd783c63e558aa4e49198fcb4dd2020d82d --- /dev/null +++ b/vlmeval/api/doubao_vl_api.py @@ -0,0 +1,206 @@ +from vlmeval.smp import * +import os +import sys +from vlmeval.api.base import BaseAPI +import math +from vlmeval.dataset import DATASET_TYPE +from vlmeval.dataset import img_root_map +from io import BytesIO +import pandas as pd +import requests +import json +import base64 +import time +from openai import OpenAI + + +class DoubaoVLWrapper(BaseAPI): + + is_api: bool = True + + def __init__(self, + model: str = '', + retry: int = 5, + wait: int = 5, + verbose: bool = True, + system_prompt: str = None, + temperature: float = 0, + timeout: int = 60, + max_tokens: int = 4096, + api_base: str = 'https://ark.cn-beijing.volces.com/api/v3',#使用系统推荐的服务区域地址 + **kwargs): + + self.model = model# This variable is unused + self.cur_idx = 0 + self.fail_msg = 'Failed to obtain answer via API. ' + self.temperature = temperature + self.max_tokens = max_tokens + + warnings.warn('You may need to set the env variable DOUBAO_VL_KEY& DOUBAO_VL_ENDPOINT to use DOUBAO_VL.') + + key = os.environ.get('DOUBAO_VL_KEY', None) + assert key is not None, 'Please set the environment variable DOUBAO_VL_KEY. ' + self.key = key + + endpoint = os.getenv('DOUBAO_VL_ENDPOINT', None) + assert endpoint is not None, 'Please set the environment variable DOUBAO_VL_ENDPOINT. ' + self.endpoint = endpoint + + assert api_base is not None, 'Please set the variable API_BASE. ' + self.api_base = api_base + self.timeout = timeout + + super().__init__(wait=wait, retry=retry, system_prompt=system_prompt, verbose=verbose, **kwargs) + + + self.client = OpenAI( + api_key = self.key, + base_url = self.api_base, + ) + + self.logger.info(f'Using API Base: {self.api_base}; End Point: {self.endpoint}; API Key: {self.key}') + + def dump_image(self, line, dataset): + """Dump the image(s) of the input line to the corresponding dataset folder. + + Args: + line (line of pd.DataFrame): The raw input line. + dataset (str): The name of the dataset. + + Returns: + str | list[str]: The paths of the dumped images. + """ + ROOT = LMUDataRoot() + assert isinstance(dataset, str) + + img_root = os.path.join(ROOT, 'images', img_root_map(dataset) if dataset in img_root_map(dataset) else dataset) + os.makedirs(img_root, exist_ok=True) + if 'image' in line: + if isinstance(line['image'], list): + tgt_path = [] + assert 'image_path' in line + for img, im_name in zip(line['image'], line['image_path']): + path = osp.join(img_root, im_name) + if not read_ok(path): + decode_base64_to_image_file(img, path) + tgt_path.append(path) + else: + tgt_path = osp.join(img_root, f"{line['index']}.jpg") + if not read_ok(tgt_path): + decode_base64_to_image_file(line['image'], tgt_path) + tgt_path = [tgt_path] + else: + assert 'image_path' in line + tgt_path = toliststr(line['image_path']) + + return tgt_path + + def use_custom_prompt(self, dataset_name): + if dataset_name == 'MathVerse_MINI_Vision_Only': + return True + else: + return False + + + def build_prompt(self, line, dataset: str) -> list[dict[str, str]]: + + if dataset in {'MathVerse_MINI_Vision_Only'}: + return self. _build_mathVerse_mini_vision_only_prompt(line, dataset) + raise ValueError(f'Unsupported dataset: {dataset}') + + def _build_mathVerse_mini_vision_only_prompt(self, line, dataset=None): + assert self.use_custom_prompt(dataset) + assert dataset is None or isinstance(dataset, str) + + tgt_path = self.dump_image(line, dataset) + + question = line['question'] + + ###remove 'directly' from the prompt, so the model will answer the question in Chain-of-Thought (CoT) manner + prompt = question.replace('directly','',1) + + msgs = [] + if isinstance(tgt_path, list): + msgs.extend([dict(type='image', value=p) for p in tgt_path]) + else: + msgs = [dict(type='image', value=tgt_path)] + msgs.append(dict(type='text', value=prompt)) + return msgs + + # inputs can be a lvl-2 nested list: [content1, content2, content3, ...] + # content can be a string or a list of image & text + def prepare_itlist(self, inputs): + assert np.all([isinstance(x, dict) for x in inputs]) + has_images = np.sum([x['type'] == 'image' for x in inputs]) + if has_images: + content_list = [] + for msg in inputs: + if msg['type'] == 'text': + content_list.append(dict(type='text', text=msg['value'])) + elif msg['type'] == 'image': + from PIL import Image + img = Image.open(msg['value']) + b64 = encode_image_to_base64(img) + img_struct = dict(url=f'data:image/jpeg;base64,{b64}') + content_list.append(dict(type='image_url', image_url=img_struct)) + else: + assert all([x['type'] == 'text' for x in inputs]) + text = '\n'.join([x['value'] for x in inputs]) + content_list = [dict(type='text', text=text)] + return content_list + + def prepare_inputs(self, inputs): + input_msgs = [] + if self.system_prompt is not None: + input_msgs.append(dict(role='system', content=self.system_prompt)) + assert isinstance(inputs, list) and isinstance(inputs[0], dict) + assert np.all(['type' in x for x in inputs]) or np.all(['role' in x for x in inputs]), inputs + if 'role' in inputs[0]: + assert inputs[-1]['role'] == 'user', inputs[-1] + for item in inputs: + input_msgs.append(dict(role=item['role'], content=self.prepare_itlist(item['content']))) + else: + input_msgs.append(dict(role='user', content=self.prepare_itlist(inputs))) + return input_msgs + + def generate_inner(self, inputs, **kwargs) -> str: + + + input_msgs = self.prepare_inputs(inputs) + temperature = kwargs.pop('temperature', self.temperature) + max_tokens = kwargs.pop('max_tokens', self.max_tokens) + + ret_code = -1 + answer = self.fail_msg + response = None + try: + response = self.client.chat.completions.create( + model=self.endpoint, + messages=input_msgs, + max_tokens=max_tokens, + temperature=temperature + ) + answer = response.choices[0].message.content.strip() + ret_code = 0 + except Exception as err: + if self.verbose: + self.logger.error(f'{type(err)}: {err}') + self.logger.error(response.text if hasattr(response, 'text') else response) + + return ret_code, answer, response + +class DoubaoVL(DoubaoVLWrapper): + + def generate(self, message, dataset=None): + return super(DoubaoVL, self).generate(message) + +if __name__ == '__main__': + #export DOUBAO_VL_KEY='' + #export DOUBAO_VL_ENDPOINT='' + model = DoubaoVLWrapper( verbose=True) + inputs = [ + {'type': 'image', 'value': './assets/apple.jpg'}, + {'type': 'text', 'value': '请详细描述一下这张图片。'}, + ] + code, answer, resp = model.generate_inner(inputs) + print(code, answer, resp) diff --git a/vlmeval/api/glm_vision.py b/vlmeval/api/glm_vision.py new file mode 100644 index 0000000000000000000000000000000000000000..472e51f90de941fa73e726d29b0bc813216c31cf --- /dev/null +++ b/vlmeval/api/glm_vision.py @@ -0,0 +1,77 @@ +import requests +requests.packages.urllib3.disable_warnings() + +from vlmeval.smp import * +from vlmeval.api.base import BaseAPI +from vlmeval.dataset import DATASET_TYPE +from vlmeval.smp.vlm import encode_image_file_to_base64 + + +class GLMVisionWrapper(BaseAPI): + + is_api: bool = True + + def __init__(self, + model: str, + retry: int = 5, + wait: int = 5, + key: str = None, + verbose: bool = True, + system_prompt: str = None, + max_tokens: int = 4096, + proxy: str = None, + **kwargs): + + from zhipuai import ZhipuAI + self.model = model + self.fail_msg = 'Failed to obtain answer via API. ' + if key is None: + key = os.environ.get('GLMV_API_KEY', None) + assert key is not None, ( + 'Please set the API Key (obtain it here: ' + 'https://bigmodel.cn)' + ) + self.client = ZhipuAI(api_key=key) + super().__init__(wait=wait, retry=retry, system_prompt=system_prompt, verbose=verbose, **kwargs) + + def build_msgs(self, msgs_raw, system_prompt=None, dataset=None): + msgs = cp.deepcopy(msgs_raw) + content = [] + for i, msg in enumerate(msgs): + if msg['type'] == 'text': + content.append(dict(type='text', text=msg['value'])) + elif msg['type'] == 'image': + content.append(dict(type='image_url', image_url=dict(url=encode_image_file_to_base64(msg['value'])))) + if dataset in {'HallusionBench', 'POPE'}: + content.append(dict(type="text", text="Please answer yes or no.")) + ret = [dict(role='user', content=content)] + return ret + + def generate_inner(self, inputs, **kwargs) -> str: + assert isinstance(inputs, str) or isinstance(inputs, list) + inputs = [inputs] if isinstance(inputs, str) else inputs + + messages = self.build_msgs(msgs_raw=inputs, dataset=kwargs.get('dataset', None)) + + response = self.client.chat.completions.create( + model=self.model, + messages=messages, + do_sample=False, + max_tokens=2048 + ) + try: + answer = response.choices[0].message.content.strip() + if self.verbose: + self.logger.info(f'inputs: {inputs}\nanswer: {answer}') + return 0, answer, 'Succeeded!' + except Exception as err: + if self.verbose: + self.logger.error(f'{type(err)}: {err}') + self.logger.error(f'The input messages are {inputs}.') + return -1, self.fail_msg, '' + + +class GLMVisionAPI(GLMVisionWrapper): + + def generate(self, message, dataset=None): + return super(GLMVisionAPI, self).generate(message, dataset=dataset) diff --git a/vlmeval/api/gpt.py b/vlmeval/api/gpt.py new file mode 100644 index 0000000000000000000000000000000000000000..c96e74a3ad600257b87f2d9e667a33561190e617 --- /dev/null +++ b/vlmeval/api/gpt.py @@ -0,0 +1,294 @@ +from ..smp import * +import os +import sys +from .base import BaseAPI +from .llm_center_gpt import ChatClient +import code, traceback, signal + +APIBASES = { + 'OFFICIAL': 'https://api.openai.com/v1/chat/completions', +} + +map_to_llm_center_code = { + 'gpt-4-1106-preview': 36, + #"gpt-4-turbo": 36, + "gpt-3.5-turbo-0125": 136, + "gpt-4o-mini-2024-07-18": 152 + +} + + +def GPT_context_window(model): + length_map = { + 'gpt-4': 8192, + 'gpt-4-0613': 8192, + 'gpt-4-turbo-preview': 128000, + 'gpt-4-1106-preview': 128000, + 'gpt-4-0125-preview': 128000, + 'gpt-4-vision-preview': 128000, + 'gpt-4-turbo': 128000, + 'gpt-4-turbo-2024-04-09': 128000, + 'gpt-3.5-turbo': 16385, + 'gpt-3.5-turbo-0125': 16385, + 'gpt-3.5-turbo-1106': 16385, + 'gpt-3.5-turbo-instruct': 4096, + } + if model in length_map: + return length_map[model] + else: + return 128000 + +class OpenAIWrapper(BaseAPI): + + is_api: bool = True + + def __init__(self, + model: str = 'gpt-3.5-turbo-0613', + retry: int = 5, + wait: int = 5, + key: str = None, + verbose: bool = True, + system_prompt: str = None, + temperature: float = 0, + timeout: int = 60, + api_base: str = None, + max_tokens: int = 1024, + img_size: int = 512, + img_detail: str = 'low', + use_azure: bool = False, + **kwargs): + + self.model = model + self.cur_idx = 0 + self.fail_msg = 'Failed to obtain answer via API. ' + self.max_tokens = max_tokens + self.temperature = temperature + self.use_azure = use_azure + + if 'step-1v' in model: + env_key = os.environ.get('STEPAI_API_KEY', '') + if key is None: + key = env_key + elif 'yi-vision' in model: + env_key = os.environ.get('YI_API_KEY', '') + if key is None: + key = env_key + else: + if use_azure: + env_key = os.environ.get('AZURE_OPENAI_API_KEY', None) + assert env_key is not None, 'Please set the environment variable AZURE_OPENAI_API_KEY. ' + + if key is None: + key = env_key + assert isinstance(key, str), ( + 'Please set the environment variable AZURE_OPENAI_API_KEY to your openai key. ' + ) + else: + env_key = os.environ.get('OPENAI_API_KEY', '') + if key is None: + key = env_key + # assert isinstance(key, str) and key.startswith('sk-'), ( + # f'Illegal openai_key {key}. ' + # 'Please set the environment variable OPENAI_API_KEY to your openai key. ' + # ) + + self.key = key + assert img_size > 0 or img_size == -1 + self.img_size = img_size + assert img_detail in ['high', 'low'] + self.img_detail = img_detail + self.timeout = timeout + + super().__init__(wait=wait, retry=retry, system_prompt=system_prompt, verbose=verbose, **kwargs) + + if use_azure: + api_base_template = ( + '{endpoint}openai/deployments/{deployment_name}/chat/completions?api-version={api_version}' + ) + endpoint = os.getenv('AZURE_OPENAI_ENDPOINT', None) + assert endpoint is not None, 'Please set the environment variable AZURE_OPENAI_ENDPOINT. ' + deployment_name = os.getenv('AZURE_OPENAI_DEPLOYMENT_NAME', None) + assert deployment_name is not None, 'Please set the environment variable AZURE_OPENAI_DEPLOYMENT_NAME. ' + api_version = os.getenv('OPENAI_API_VERSION', None) + assert api_version is not None, 'Please set the environment variable OPENAI_API_VERSION. ' + + self.api_base = api_base_template.format( + endpoint=os.getenv('AZURE_OPENAI_ENDPOINT'), + deployment_name=os.getenv('AZURE_OPENAI_DEPLOYMENT_NAME'), + api_version=os.getenv('OPENAI_API_VERSION') + ) + else: + if api_base is None: + if 'OPENAI_API_BASE' in os.environ and os.environ['OPENAI_API_BASE'] != '': + self.logger.info('Environment variable OPENAI_API_BASE is set. Will use it as api_base. ') + api_base = os.environ['OPENAI_API_BASE'] + else: + api_base = 'OFFICIAL' + + assert api_base is not None + + if api_base in APIBASES: + self.api_base = APIBASES[api_base] + elif api_base.startswith('http'): + self.api_base = api_base + else: + self.logger.error('Unknown API Base. ') + sys.exit(-1) + + self.logger.info(f'Using API Base: {self.api_base}; API Key: {self.key}') + print(f'Init finished', flush=True) + + # inputs can be a lvl-2 nested list: [content1, content2, content3, ...] + # content can be a string or a list of image & text + def prepare_itlist(self, inputs): + assert np.all([isinstance(x, dict) for x in inputs]) + has_images = np.sum([x['type'] == 'image' for x in inputs]) + if has_images: + content_list = [] + for msg in inputs: + if msg['type'] == 'text': + content_list.append(dict(type='text', text=msg['value'])) + elif msg['type'] == 'image': + from PIL import Image + img = Image.open(msg['value']) + b64 = encode_image_to_base64(img, target_size=self.img_size) + img_struct = dict(url=f'data:image/jpeg;base64,{b64}', detail=self.img_detail) + content_list.append(dict(type='image_url', image_url=img_struct)) + else: + assert all([x['type'] == 'text' for x in inputs]) + text = '\n'.join([x['value'] for x in inputs]) + content_list = [dict(type='text', text=text)] + return content_list + + def prepare_inputs(self, inputs): + input_msgs = [] + # if self.system_prompt is not None: + # input_msgs.append(dict(role='system', content=self.system_prompt)) + assert isinstance(inputs, list) and isinstance(inputs[0], dict) + assert np.all(['type' in x for x in inputs]) or np.all(['role' in x for x in inputs]), inputs + if 'role' in inputs[0]: + assert inputs[-1]['role'] == 'user', inputs[-1] + for item in inputs: + input_msgs.append(dict(role=item['role'], content=self.prepare_itlist(item['content']))) + else: + input_msgs.append(dict(role='user', content=self.prepare_itlist(inputs))) + return input_msgs + + def generate_inner(self, inputs, **kwargs) -> str: + input_msgs = self.prepare_inputs(inputs) + temperature = kwargs.pop('temperature', self.temperature) + max_tokens = kwargs.pop('max_tokens', self.max_tokens) + + context_window = GPT_context_window(self.model) + max_tokens = min(max_tokens, context_window - self.get_token_len(inputs)) + if 0 < max_tokens <= 100: + self.logger.warning( + 'Less than 100 tokens left, ' + 'may exceed the context window with some additional meta symbols. ' + ) + if max_tokens <= 0: + return 0, self.fail_msg + 'Input string longer than context window. ', 'Length Exceeded. ' + + # Will send request if use Azure, dk how to use openai client for it + if self.use_azure: + headers = {'Content-Type': 'application/json', 'api-key': self.key} + else: + headers = {'Content-Type': 'application/json', 'Authorization': f'Bearer {self.key}'} + payload = dict( + model=self.model, + messages=input_msgs, + max_tokens=max_tokens, + n=1, + temperature=temperature, + **kwargs) + + # # START ORIGIN ## + # response = requests.post( + # self.api_base, + # headers=headers, data=json.dumps(payload), timeout=self.timeout * 1.1) + + # END ORIGIN ## + + # START LLM CENTER ## + chat = ChatClient() + + # exit() + # print(f"map {self.model} to model_id: {map_to_llm_center_code[self.model]}") + # print(f"Request user_prompt: {input_msgs[0]['content'][0]['text']}") + #response = chat.chat_sync_retry(user_prompt=input_msgs[0]['content'][0]['text'], + # model_id=map_to_llm_center_code[self.model], + # max_tokens=max_tokens, + # return_post_resp=True) + if self.system_prompt is not None: + response = chat.chat_sync_retry(user_prompt=input_msgs[0]['content'][0]['text'], + model_id=map_to_llm_center_code[self.model], + max_tokens=max_tokens, + system_prompt=self.system_prompt, + return_post_resp=True) + else: + response = chat.chat_sync_retry( + user_prompt=input_msgs[0]["content"][0]["text"], + model_id=map_to_llm_center_code[self.model], + max_tokens=max_tokens, + return_post_resp=True, + ) + ## END LLM CENTER ## + + + ret_code = response.status_code + ret_code = 0 if (200 <= int(ret_code) < 300) else ret_code + answer = self.fail_msg + try: + resp_struct = json.loads(response.text) + answer = resp_struct['choices'][0]['message']['content'].strip() + # print(f"Answer: {answer}") + except: + try: + answer = json.loads(response.content)['data']['messages'][0]['content'] + except: + pass + pass + return ret_code, answer, response + + def get_image_token_len(self, img_path, detail='low'): + import math + if detail == 'low': + return 85 + + im = Image.open(img_path) + height, width = im.size + if width > 1024 or height > 1024: + if width > height: + height = int(height * 1024 / width) + width = 1024 + else: + width = int(width * 1024 / height) + height = 1024 + + h = math.ceil(height / 512) + w = math.ceil(width / 512) + total = 85 + 170 * h * w + return total + + def get_token_len(self, inputs) -> int: + import tiktoken + try: + enc = tiktoken.encoding_for_model(self.model) + except: + enc = tiktoken.encoding_for_model('gpt-4') + assert isinstance(inputs, list) + tot = 0 + for item in inputs: + if 'role' in item: + tot += self.get_token_len(item['content']) + elif item['type'] == 'text': + tot += len(enc.encode(item['value'])) + elif item['type'] == 'image': + tot += self.get_image_token_len(item['value'], detail=self.img_detail) + return tot + + +class GPT4V(OpenAIWrapper): + + def generate(self, message, dataset=None): + return super(GPT4V, self).generate(message) diff --git a/vlmeval/api/hf_chat_model.py b/vlmeval/api/hf_chat_model.py new file mode 100644 index 0000000000000000000000000000000000000000..255e29ae943ace4d596fc1d0dd22ce0a7e7d03cb --- /dev/null +++ b/vlmeval/api/hf_chat_model.py @@ -0,0 +1,263 @@ +import os +import sys +import os.path as osp +import torch +from ..smp import * + + +def get_gpu_num(model_name): + model_name = model_name.lower() + kws = { + 8: ['65b', '70b'], + 4: ['30b', '33b', '35b', '40b'], + 2: ['13b', '14b', '20b', '8b'], + 1: ['6b', '7b', 'moss'], + } + for k in [8, 4, 2, 1]: + for keyword in kws[k]: + if keyword in model_name: + return k + return 8 + + +validated_llms = [ + 'internlm/internlm-chat-7b', 'internlm/internlm-chat-7b-8k', 'internlm/internlm-chat-20b', + 'Qwen/Qwen-7B-Chat', 'Qwen/Qwen-14B-Chat', + 'THUDM/chatglm2-6b', 'THUDM/chatglm2-6b-32k', 'THUDM/chatglm3-6b', 'THUDM/chatglm3-6b-32k', + 'baichuan-inc/Baichuan2-7B-Chat', 'baichuan-inc/Baichuan2-13B-Chat', + 'lmsys/vicuna-7b-v1.5', 'lmsys/vicuna-13b-v1.5', + 'meta-llama/Llama-2-7b-chat-hf', + 'meta-llama/Llama-3.1-8B-Instruct' +] +Auto_model = ['chatglm'] + + +class HFChatModel: + + def _get_context_length(self, model, model_path): + # By default, we use model.config.seq_length + model_path = model_path.lower() + if 'baichuan' in model_path: + context_window = model.config.model_max_length + elif 'internlm' in model_path or 'llama' in model_path: + context_window = model.config.max_position_embeddings + elif 'vicuna' in model_path: + context_window = model.generation_config.max_length + else: + # chatglm & qwen + context_window = model.config.seq_length + return context_window + + def _get_context_length_robust(self, model, model_path): + try: + context_window = self._get_context_length(model, model_path) + return context_window + except Exception as err: + self.logger.critical(f'{type(err)}: {err}') + self.logger.critical( + 'Failed to extract context_window information from config / generation_config. ' + 'Please read the above code and check if the logic works for you model path' + ) + raise NotImplementedError + + def __init__(self, + model_path, + system_prompt: str = None, + **kwargs): + + self.logger = get_logger('HFChatModel') + if 'vicuna' in model_path.lower() or 'llama' in model_path.lower(): + try: + from fastchat.model import get_conversation_template + except Exception as err: + self.logger.critical('Please install fastchat first to use vicuna. ') + raise err + + self.explicit_device = kwargs.pop('device', None) + if self.explicit_device is None: + # If CUDA_VISIBLE_DEVICES is not properly set + if 'CUDA_VISIBLE_DEVICES' not in os.environ or os.environ['CUDA_VISIBLE_DEVICES'] == '0,1,2,3,4,5,6,7': + num_gpu = get_gpu_num(model_path) + gpu_offset = kwargs.pop('gpu_offset', 0) + cuda_visible_devices = ','.join([str(i) for i in range(gpu_offset, gpu_offset + num_gpu)]) + os.environ['CUDA_VISIBLE_DEVICES'] = cuda_visible_devices + + from transformers import AutoTokenizer, AutoModelForCausalLM, AutoModel + from transformers.generation import GenerationConfig + + if model_path not in validated_llms: + self.logger.warning(f'{model_path} not in validated LLMs, may have inference troubles. ') + + self.model_path = model_path + if listinstr(Auto_model, model_path): + LoadModel = AutoModel + else: + LoadModel = AutoModelForCausalLM + assert osp.exists(model_path) or len(model_path.split('/')) == 2 + + device = self.explicit_device if self.explicit_device else 'auto' + + precision = {} + if 'internlm-chat-7b' in model_path: + precision = {'torch_dtype': torch.float16} + elif 'internlm-chat-20b' in model_path: + precision = {'torch_dtype': torch.bfloat16} + + self.tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True) + cuda_devices = os.environ.get('CUDA_VISIBLE_DEVICES', '0') + if ',' in cuda_devices: + device_ids = [int(x) for x in cuda_devices.split(',')] + device_map = {i: i for i in range(len(device_ids))} + else: + device_map = {'': 0} + + if 'llama' in self.model_path.lower(): + from lmdeploy import pipeline, GenerationConfig, TurbomindEngineConfig + print(f"Loading model {model_path} with {num_gpu} GPUs") + backend_config = TurbomindEngineConfig(tp=num_gpu) + self.gen_config = GenerationConfig(max_new_tokens=256) + model = pipeline(model_path, + backend_config=backend_config) + else: + model = LoadModel.from_pretrained(model_path, trust_remote_code=True, device_map='cpu', **precision) + model = model.eval() + + if device != 'cpu': + model = model.to(f'cuda:{device}' if isinstance(device, int) else 'cuda') + try: + model.generation_config = GenerationConfig.from_pretrained( + model_path, trust_remote_code=True, device_map=device) + except Exception as err: + self.logger.warning(f'{type(err)}: {err}') + + self.context_length = self._get_context_length_robust(model=model, model_path=model_path) + + torch.cuda.empty_cache() + self.model = model + self.answer_buffer = 192 + self.system_prompt = system_prompt + for k, v in kwargs.items(): + self.logger.info(f'Following args will be used for generation (If not set specifically), {k}: {v}. ') + self.kwargs = kwargs + + def generate_str(self, input, **kwargs): + if 'baichuan' in self.model_path.lower(): + messages = [] + messages.append({'role': 'user', 'content': input}) + resp = self.model.chat(self.tokenizer, messages, **kwargs) + elif 'vicuna' in self.model_path.lower(): + from fastchat.model import get_conversation_template + conv = get_conversation_template('vicuna') + conv.append_message(conv.roles[0], input) + conv.append_message(conv.roles[1], None) + prompt = conv.get_prompt() + inputs = self.tokenizer([prompt], return_tensors='pt') + if torch.cuda.is_available(): + for k in inputs: + inputs[k] = inputs[k].cuda() + + params = dict(do_sample=True, temperature=0.7, repetition_penalty=1.0, max_new_tokens=512) + params.update(self.kwargs) + params.update(kwargs) + outputs = self.model.generate(**inputs, **params) + resp = self.tokenizer.decode( + outputs[0][len(inputs['input_ids'][0]):], + skip_special_tokens=True, + spaces_between_special_tokens=False) + elif 'llama' in self.model_path.lower(): + prompt = [{'role': 'system', 'content': self.system_prompt}, {'role': 'user', 'content': input}] + resp = self.model(prompt, gen_config=self.gen_config).text + else: + params = self.kwargs + params.update(kwargs) + resp, _ = self.model.chat(self.tokenizer, input, history=[], **params) + + return resp + + def length_ok(self, inputs): + tot = len(self.tokenizer.encode(self.system_prompt)) if self.system_prompt is not None else 0 + for s in inputs: + tot += len(self.tokenizer.encode(s)) + return tot + self.answer_buffer < self.context_length + + def generate_list(self, full_inputs, offset=0, **kwargs): + assert isinstance(full_inputs, list) + inputs = full_inputs[offset:] + if not self.length_ok(inputs): + return self.chat(full_inputs, offset + 1) + + model_path = self.model_path.lower() + + if sum([x in model_path for x in ['baichuan']]): + input_msgs = [] + if self.system_prompt is not None: + input_msgs.append(dict(role='user', content=self.system_prompt)) + if len(inputs): + assert isinstance(inputs, list) and isinstance(inputs[0], str) + roles = ['user', 'assistant'] if len(inputs) % 2 == 1 else ['assistant', 'user'] + roles = roles * len(inputs) + for role, msg in zip(roles, inputs): + input_msgs.append(dict(role=role, content=msg)) + response = self.model.chat(self.tokenizer, input_msgs) + elif sum([x in model_path for x in ['vicuna']]): + from fastchat.model import get_conversation_template + conv = get_conversation_template('vicuna') + assert isinstance(inputs, list) and isinstance(inputs[0], str) + if len(inputs) % 2 == 1: + if self.system_prompt is not None: + conv.append_message(conv.roles[0], self.system_prompt) + for i in range(len(inputs) // 2): + conv.append_message(conv.roles[0], inputs[2 * i]) + conv.append_message(conv.roles[1], inputs[2 * i + 1]) + else: + assert self.system_prompt is not None + conv.append_message(conv.roles[0], self.system_prompt) + conv.append_message(conv.roles[1], inputs[0]) + for i in range(len(inputs) // 2 - 1): + conv.append_message(conv.roles[0], inputs[2 * i + 1]) + conv.append_message(conv.roles[1], inputs[2 * i + 2]) + conv.append_message(conv.roles[0], inputs[-1]) + conv.append_message(conv.roles[1], None) + prompt = conv.get_prompt() + inputs = self.tokenizer([prompt], return_tensors='pt') + if torch.cuda.is_available(): + for k in inputs: + inputs[k] = inputs[k].cuda() + + params = dict(do_sample=True, temperature=0.7, repetition_penalty=1.0, max_new_tokens=512) + params.update(self.kwargs) + params.update(kwargs) + + outputs = self.model.generate(**inputs, **params) + response = self.tokenizer.decode( + outputs[0][len(inputs['input_ids'][0]):], + skip_special_tokens=True, + spaces_between_special_tokens=False) + response = response.lstrip('\n') + else: + # The default option, support internlm, chatglm, qwen + history, msg = [], None + if len(inputs) % 2 == 1: + if self.system_prompt is not None: + history = [(self.system_prompt, '')] + for i in range(len(inputs) // 2): + history.append((inputs[2 * i], inputs[2 * i + 1])) + else: + assert self.system_prompt is not None + history = [(self.system_prompt, inputs[0])] + for i in range(len(inputs) // 2 - 1): + history.append((inputs[2 * i + 1], inputs[2 * i + 2])) + msg = inputs[-1] + + params = self.kwargs + params.update(kwargs) + response, _ = self.model.chat(self.tokenizer, msg, history=history, **params) + + return response, offset + + def generate(self, inputs, **kwargs): + if isinstance(inputs, str): + return self.generate_str(inputs, **kwargs) + elif isinstance(inputs, list): + return self.generate_list(inputs, **kwargs) + diff --git a/vlmeval/api/hunyuan.py b/vlmeval/api/hunyuan.py new file mode 100644 index 0000000000000000000000000000000000000000..095990626e08f676424a70160455f8ce4ef7baa7 --- /dev/null +++ b/vlmeval/api/hunyuan.py @@ -0,0 +1,219 @@ +from vlmeval.smp import * +import os +import sys +from vlmeval.api.base import BaseAPI +import math +from vlmeval.dataset import DATASET_TYPE +from vlmeval.dataset import img_root_map +from io import BytesIO +import pandas as pd +import requests +import json +import base64 +import time + + +class HunyuanWrapper(BaseAPI): + + is_api: bool = True + _apiVersion = '2024-12-31' + _service = 'hunyuan' + + def __init__(self, + model: str = 'hunyuan-standard-vision', + retry: int = 5, + wait: int = 5, + secret_key: str = None, + secret_id: str = None, + verbose: bool = True, + system_prompt: str = None, + temperature: float = 0, + timeout: int = 60, + api_base: str = 'hunyuan.tencentcloudapi.com', + **kwargs): + + self.model = model + self.cur_idx = 0 + self.fail_msg = 'Failed to obtain answer via API. ' + self.temperature = temperature + + warnings.warn('You may need to set the env variable HUNYUAN_SECRET_ID & HUNYUAN_SECRET_KEY to use Hunyuan. ') + + secret_key = os.environ.get('HUNYUAN_SECRET_KEY', secret_key) + assert secret_key is not None, 'Please set the environment variable HUNYUAN_SECRET_KEY. ' + secret_id = os.environ.get('HUNYUAN_SECRET_ID', secret_id) + assert secret_id is not None, 'Please set the environment variable HUNYUAN_SECRET_ID. ' + + self.model = model + self.endpoint = api_base + self.secret_id = secret_id + self.secret_key = secret_key + self.timeout = timeout + + try: + from tencentcloud.common import credential + from tencentcloud.common.profile.client_profile import ClientProfile + from tencentcloud.common.profile.http_profile import HttpProfile + from tencentcloud.hunyuan.v20230901 import hunyuan_client + except ImportError as err: + self.logger.critical('Please install tencentcloud-sdk-python to use Hunyuan API. ') + raise err + + super().__init__(wait=wait, retry=retry, system_prompt=system_prompt, verbose=verbose, **kwargs) + + cred = credential.Credential(self.secret_id, self.secret_key) + httpProfile = HttpProfile(reqTimeout=300) + httpProfile.endpoint = self.endpoint + clientProfile = ClientProfile() + clientProfile.httpProfile = httpProfile + self.client = hunyuan_client.HunyuanClient(cred, '', clientProfile) + self.logger.info( + f'Using Endpoint: {self.endpoint}; API Secret ID: {self.secret_id}; API Secret Key: {self.secret_key}' + ) + + def dump_image(self, line, dataset): + """Dump the image(s) of the input line to the corresponding dataset folder. + + Args: + line (line of pd.DataFrame): The raw input line. + dataset (str): The name of the dataset. + + Returns: + str | list[str]: The paths of the dumped images. + """ + ROOT = LMUDataRoot() + assert isinstance(dataset, str) + + img_root = os.path.join(ROOT, 'images', img_root_map(dataset) if dataset in img_root_map(dataset) else dataset) + os.makedirs(img_root, exist_ok=True) + if 'image' in line: + if isinstance(line['image'], list): + tgt_path = [] + assert 'image_path' in line + for img, im_name in zip(line['image'], line['image_path']): + path = osp.join(img_root, im_name) + if not read_ok(path): + decode_base64_to_image_file(img, path) + tgt_path.append(path) + else: + tgt_path = osp.join(img_root, f"{line['index']}.jpg") + if not read_ok(tgt_path): + decode_base64_to_image_file(line['image'], tgt_path) + tgt_path = [tgt_path] + else: + assert 'image_path' in line + tgt_path = toliststr(line['image_path']) + + return tgt_path + + def use_custom_prompt(self, dataset_name): + if DATASET_TYPE(dataset_name) == 'MCQ': + return True + else: + return False + + def build_prompt(self, line, dataset=None): + assert self.use_custom_prompt(dataset) + assert dataset is None or isinstance(dataset, str) + + tgt_path = self.dump_image(line, dataset) + + question = line['question'] + options = { + cand: line[cand] + for cand in string.ascii_uppercase + if cand in line and not pd.isna(line[cand]) + } + options_prompt = 'Options:\n' + for key, item in options.items(): + options_prompt += f'{key}. {item}\n' + hint = line['hint'] if ('hint' in line and not pd.isna(line['hint'])) else None + prompt = '' + if hint is not None: + prompt += f'Hint: {hint}\n' + prompt += f'Question: {question}\n' + if len(options): + prompt += options_prompt + prompt += 'Answer with the option letter from the given choices directly.' + + msgs = [] + if isinstance(tgt_path, list): + msgs.extend([dict(type='image', value=p) for p in tgt_path]) + else: + msgs = [dict(type='image', value=tgt_path)] + msgs.append(dict(type='text', value=prompt)) + return msgs + + # inputs can be a lvl-2 nested list: [content1, content2, content3, ...] + # content can be a string or a list of image & text + def prepare_itlist(self, inputs): + assert np.all([isinstance(x, dict) for x in inputs]) + has_images = np.sum([x['type'] == 'image' for x in inputs]) + if has_images: + content_list = [] + for msg in inputs: + if msg['type'] == 'text': + content_list.append(dict(Type='text', Text=msg['value'])) + elif msg['type'] == 'image': + from PIL import Image + img = Image.open(msg['value']) + b64 = encode_image_to_base64(img) + img_struct = dict(Url=f'data:image/jpeg;base64,{b64}') + content_list.append(dict(Type='image_url', ImageUrl=img_struct)) + else: + assert all([x['type'] == 'text' for x in inputs]) + text = '\n'.join([x['value'] for x in inputs]) + content_list = [dict(Type='text', Text=text)] + return content_list + + def prepare_inputs(self, inputs): + input_msgs = [] + if self.system_prompt is not None: + input_msgs.append(dict(Role='system', Content=self.system_prompt)) + assert isinstance(inputs, list) and isinstance(inputs[0], dict) + assert np.all(['type' in x for x in inputs]) or np.all(['role' in x for x in inputs]), inputs + if 'role' in inputs[0]: + assert inputs[-1]['role'] == 'user', inputs[-1] + for item in inputs: + input_msgs.append(dict(Role=item['role'], Contents=self.prepare_itlist(item['content']))) + else: + input_msgs.append(dict(Role='user', Contents=self.prepare_itlist(inputs))) + return input_msgs + + def generate_inner(self, inputs, **kwargs) -> str: + from tencentcloud.common.exception.tencent_cloud_sdk_exception import TencentCloudSDKException + from tencentcloud.hunyuan.v20230901 import models + + input_msgs = self.prepare_inputs(inputs) + temperature = kwargs.pop('temperature', self.temperature) + + payload = dict( + Model=self.model, + Messages=input_msgs, + Temperature=temperature, + TopK=1, + **kwargs) + + try: + req = models.ChatCompletionsRequest() + req.from_json_string(json.dumps(payload)) + resp = self.client.ChatCompletions(req) + resp = json.loads(resp.to_json_string()) + answer = resp['Choices'][0]['Message']['Content'] + return 0, answer, resp + except TencentCloudSDKException as e: + self.logger.error(f'Got error code: {e.get_code()}') + if e.get_code() == 'ClientNetworkError': + return -1, self.fail_msg + e.get_code(), None + elif e.get_code() in ['InternalError', 'ServerNetworkError']: + return -1, self.fail_msg + e.get_code(), None + elif e.get_code() in ['LimitExceeded']: + return -1, self.fail_msg + e.get_code(), None + else: + return -1, self.fail_msg + str(e), None + + +class HunyuanVision(HunyuanWrapper): + + def generate(self, message, dataset=None): + return super(HunyuanVision, self).generate(message) diff --git a/vlmeval/api/jt_vl_chat.py b/vlmeval/api/jt_vl_chat.py new file mode 100644 index 0000000000000000000000000000000000000000..dc0dfba5b0ebc5cbf5dcfa52f82442330840ccea --- /dev/null +++ b/vlmeval/api/jt_vl_chat.py @@ -0,0 +1,239 @@ +import pandas as pd +import requests +import json +import os +import base64 +from vlmeval.smp import * +from vlmeval.api.base import BaseAPI +from vlmeval.dataset import DATASET_TYPE +from vlmeval.dataset import img_root_map + + +API_ENDPOINT = 'https://jiutian.10086.cn/kunlun/ingress/api/h3t-eeceff/92390745235a40a484d850be19e1f8b4/ai-5d7ae47ec93f4280953273c4001aafee/service-7544ea5ee3e841ad9d01e7af44acef7c/v1/chat/completions' # noqa: E501 +APP_CODE = 'eyJ0eXAiOiJKV1QiLCJhbGciOiJSUzI1NiJ9.eyJzdWIiOiI5ZGQwNmQ2ZjU4YTU0ZGY0OGEzNjRhMjQyNGMwODEyNSIsImlzcyI6ImFwaS1hdXRoLWtleSIsImV4cCI6NDg4MjkwNDA3OX0.k5t_T-955xWMndzBbx4WQQNAgm5DpMos9mHm7vkFipQ3yebCFMfyufpSxORSfEVpBaDS3Nly0dd8ygQYGnDgIQcC72vQ1xtkjCP49LNcqlceoET4rGc1zwRi76XLPSGFES4GcwvEmr7Ilth7XtqZNxcDF_Z7HyHyf1-zF0JIQETYSoxenqLU-gNteNfqRUnlyCgaKh03DscAbYvtoMUxEaFa2ZqyRSwekdHI_SPKCq9aC9G19yDPHTjeiwl1ubtyC5uMy5pERn_ClRsZS3Wyb-GmD5QQsFofrWvCiU_fVJuUiez39pYZvEP8awH0R9B7SkpQ4XOzj3fdytTPYy3g6g' # noqa: E501 + + +class JTVLChatWrapper(BaseAPI): + is_api: bool = True + INTERLEAVE = False + + def __init__(self, + model: str = 'jt-vl-chat', + retry: int = 5, + wait: int = 5, + api_base: str = API_ENDPOINT, + key: str = APP_CODE, + verbose: bool = True, + system_prompt: str = None, + temperature: float = 0.7, + max_tokens: int = 2048, + proxy: str = None, + **kwargs): + self.model = model + + self.temperature = temperature + self.max_tokens = max_tokens + self.api_base = api_base + + if key is None: + key = os.environ.get('JTVLChat_API_KEY', None) + assert key is not None, ( + 'Please set the API Key (also called app_code, obtain it here: https://github.com/jiutiancv/JT-VL-Chat)' + ) + + self.key = key + super().__init__(wait=wait, retry=retry, system_prompt=system_prompt, verbose=verbose, **kwargs) + + def dump_image(self, line, dataset): + """Dump the image(s) of the input line to the corresponding dataset folder. + + Args: + line (line of pd.DataFrame): The raw input line. + dataset (str): The name of the dataset. + + Returns: + str | list[str]: The paths of the dumped images. + """ + ROOT = LMUDataRoot() + assert isinstance(dataset, str) + + img_root = os.path.join(ROOT, 'images', img_root_map(dataset) if dataset in img_root_map(dataset) else dataset) + os.makedirs(img_root, exist_ok=True) + if 'image' in line: + if isinstance(line['image'], list): + tgt_path = [] + assert 'image_path' in line + for img, im_name in zip(line['image'], line['image_path']): + path = osp.join(img_root, im_name) + if not read_ok(path): + decode_base64_to_image_file(img, path) + tgt_path.append(path) + else: + tgt_path = osp.join(img_root, f"{line['index']}.jpg") + if not read_ok(tgt_path): + decode_base64_to_image_file(line['image'], tgt_path) + tgt_path = [tgt_path] + else: + assert 'image_path' in line + tgt_path = toliststr(line['image_path']) + + return tgt_path + + def use_custom_prompt(self, dataset): + assert dataset is not None + if listinstr(['MMMU_DEV_VAL','MMMU_TEST'], dataset): + return False + else: + return True + + def build_multi_choice_prompt(self, line, dataset=None): + question = line['question'] + hint = line['hint'] if ('hint' in line and not pd.isna(line['hint'])) else None + if hint is not None: + question = hint + '\n' + question + + options = { + cand: line[cand] + for cand in string.ascii_uppercase + if cand in line and not pd.isna(line[cand]) + } + for key, item in options.items(): + question += f'\n{key}. {item}' + prompt = question + + if len(options): + prompt += '\n请直接回答选项字母。' if cn_string( + prompt) else "\nAnswer with the option's letter from the given choices directly." + else: + prompt += '\n请直接回答问题。' if cn_string(prompt) else '\nAnswer the question directly.' + + return prompt + + def build_prompt(self, line, dataset=None): + assert self.use_custom_prompt(dataset) + assert dataset is None or isinstance(dataset, str) + + tgt_path = self.dump_image(line, dataset) + + if dataset is not None and listinstr(['MME'], dataset): + question = line['question'] + prompt = question + ' Answer the question using a single word or phrase.' + elif dataset is not None and listinstr(['HallusionBench'], dataset): + question = line['question'] + prompt = question + ' Please answer yes or no. Answer the question using a single word or phrase.' + elif dataset is not None and DATASET_TYPE(dataset) == 'MCQ': + prompt = self.build_multi_choice_prompt(line, dataset) + elif dataset is not None and DATASET_TYPE(dataset) == 'VQA': + if listinstr(['MathVista', 'MathVision'], dataset): + prompt = line['question'] + elif listinstr(['LLaVABench'], dataset): + question = line['question'] + prompt = question + '\nAnswer this question in detail.' + elif listinstr(['MMVet'], dataset): + prompt = line['question'] + else: + question = line['question'] + prompt = question + '\nAnswer the question using a single word or phrase.' + else: + prompt = line['question'] + message = [dict(type='text', value=prompt)] + message.extend([dict(type='image', value=s) for s in tgt_path]) + return message + + def message_to_promptimg(self, message, dataset=None): + assert not self.INTERLEAVE + model_name = self.__class__.__name__ + import warnings + warnings.warn( + f'Model {model_name} does not support interleaved input. ' + 'Will use the first image and aggregated texts as prompt. ') + num_images = len([x for x in message if x['type'] == 'image']) + if num_images == 0: + prompt = '\n'.join([x['value'] for x in message if x['type'] == 'text']) + image = None + else: + prompt = '\n'.join([x['value'] for x in message if x['type'] == 'text']) + if dataset == 'BLINK': + image = concat_images_vlmeval( + [x['value'] for x in message if x['type'] == 'image'], + target_size=512) + else: + image = [x['value'] for x in message if x['type'] == 'image'][0] + return prompt, image + + def get_send_data(self,prompt, image_path, temperature, max_tokens): + image = '' + with open(image_path, 'rb') as f: + image = str(base64.b64encode(f.read()), 'utf-8') + send_data = { + "messages": [ + { + "role": "user", + "content": prompt + } + ], + "image_base64": image, + "max_tokens": max_tokens, + "temperature": temperature + } + return send_data + + def get_send_data_no_image(self,prompt, temperature, max_tokens): + send_data = { + "messages": [ + { + "role": "user", + "content": prompt + } + ], + "max_tokens": max_tokens, + "temperature": temperature + } + return send_data + + def generate_inner(self, inputs, **kwargs) -> str: + assert isinstance(inputs, str) or isinstance(inputs, list) + inputs = [inputs] if isinstance(inputs, str) else inputs + dataset = kwargs.get('dataset', None) + prompt, image_path = self.message_to_promptimg(message=inputs, dataset=dataset) + # print("prompt:",prompt) + if image_path: + send_data = self.get_send_data( + prompt=prompt, + image_path=image_path, + temperature=self.temperature, + max_tokens=self.max_tokens) + else: + send_data = self.get_send_data_no_image( + prompt=prompt, + temperature=self.temperature, + max_tokens=self.max_tokens) + + json_data = json.dumps(send_data) + + header_dict = {'Content-Type': 'application/json', 'Authorization': 'Bearer ' + self.key} + + r = requests.post(self.api_base, headers=header_dict, data=json_data, timeout=3000) + try: + assert r.status_code == 200 + r_json = r.json() + output = r_json['choices'][0]['message']['content'] + if self.verbose: + self.logger.info(f'inputs: {inputs}\nanswer: {output}') + + return 0,output,'Succeeded! ' + + except: + error_msg = f'Error! code {r.status_code} content: {r.content}' + error_con = r.content.decode('utf-8') + if self.verbose: + self.logger.error(error_msg) + self.logger.error(error_con) + self.logger.error(f'The input messages are {inputs}.') + return -1,error_msg,'' + + +class JTVLChatAPI(JTVLChatWrapper): + + def generate(self, message, dataset=None): + return super(JTVLChatAPI, self).generate(message, dataset=dataset) diff --git a/vlmeval/api/llm_center_gpt.py b/vlmeval/api/llm_center_gpt.py new file mode 100644 index 0000000000000000000000000000000000000000..73ee9b594b18b485dfa67ec3a5045c6c223bc7d3 --- /dev/null +++ b/vlmeval/api/llm_center_gpt.py @@ -0,0 +1,296 @@ +import os +import sys +import time +import json +import requests +import traceback + +import openai + + +# openai.api_base = "https://api.zhiyungpt.com/v1" +# openai.api_key = 'sk-J3oUW0OZtUSW0NNm5a7268Ae9f2b437b933cA702A0714436' + +# openai.api_base = 'https://yeysai.com/v1' +# openai.api_key = 'sk-PMuFYQfYEay4oBglBeA8EcB121A9414dBf90Ab4591918c7d' + +# openai.api_base='https://cn2us02.opapi.win/v1' +# openai.api_key = 'sk-xIXHIDN8508485013120T3BLbKFJa099F1D19e3641a2bC33' + +class Chat: + def __init__(self, model="", timeout_sec=20, use_mianbi=True, use_hk=False): + self.model = model + self.timeout = timeout_sec + self.use_mianbi = use_mianbi + self.use_hk = use_hk + + def chat_completion(self, messages, temperature=0.2, top_p=1, max_tokens=512, + presence_penalty=0, frequency_penalty=0): + + if self.use_mianbi: + if "gpt-4" in self.model: + response = requests.post("http://120.92.10.46:8080/chat", json={ + "messages": messages, + "temperature": temperature, + "top_p": top_p, + "max_tokens": max_tokens, + "presence_penalty": presence_penalty, + "frequency_penalty": frequency_penalty + }, timeout=self.timeout).json() + + else: + response = requests.post("http://47.254.22.102:8989/chat", json={ + "model": self.model, + "messages": messages, + "temperature": temperature, + "top_p": top_p, + "max_tokens": max_tokens, + "presence_penalty": 0, + "frequency_penalty": 0 + }, timeout=self.timeout).json() + elif self.use_hk: + response = requests.post('https://api.openai-hk.com/v1/chat/completions', + headers={ + "Content-Type": "application/json", + "Authorization": "Bearer hk-sxx1clga8acagad5xfyh20lxzwmkm1gjs9myym13icwbcv5e" + }, + data=json.dumps({ + "max_tokens": max_tokens, + "model": self.model, + "temperature": temperature, + "top_p": top_p, + "presence_penalty": 0, + "messages": messages + }).encode('utf-8') + ).json() + + else: + response = openai.ChatCompletion.create( + model=self.model, + messages=messages, + temperature=temperature, + top_p=top_p, + max_tokens=max_tokens, + presence_penalty=0, + frequency_penalty=0, + timeout=20, + ) + + return response + + +def get_eval(chat, content, + chat_gpt_system='You are a helpful and precise assistant for checking the quality of the answer.', + max_tokens=256, + fail_limit=100, + temperature=0.2, + top_p=1.0, + omit_version=False): + fail_cnt = 0 + while True: + try: + resp = chat.chat_completion( + messages=[ + {"role": "system", "content": chat_gpt_system}, + {"role": "user", "content": content} + ], + temperature=temperature, + max_tokens=max_tokens, + top_p=top_p, + ) + try: + if resp['model'] != chat.model and not omit_version: + real_model = resp['model'] + print( + f'Except {chat.model}, but got message from {real_model}', flush=True) + continue + rtn = resp["choices"][0]["message"]["content"] + # time.sleep(5) + return rtn + except: + print(f'Response: {resp}') + except Exception as e: + print(e) + fail_cnt += 1 + if fail_cnt == fail_limit: + return f'-1\n' + time.sleep(10 + fail_cnt) + + +import os +import sys +import time +import glob +import pathlib +import json +import base64 +import random +import requests +# import tiktoken +import jsonlines +import traceback + +from tqdm import tqdm +from time import sleep +# from openai import OpenAI +from multiprocessing import Pool + +import pandas as pd +import concurrent.futures + +# app_code = 'gpt_table_construction' +# user_code = 'HA3btj-beZGdCXlySMeqejIfCs0yPWZpQv-A9BHmD5Y' +# user_code = '4WImjNj5EngUs7w6RUksodBnJoxmex8WnXFLT2jGsx8' not use + +# app_code = 'vlm_ocr_sft_cn' +# user_code = 'gH9Jc_6KeeRsaYFuZXOOLqano0j8wWudwAYdSIlIePA' + +# app_code = 'vlm_video_sft' +# user_code = 'gH9Jc_6KeeRsaYFuZXOOLqano0j8wWudwAYdSIlIePA' + +### temp +# app_code = 'general_qa' +# user_code = 'LGxjh5s2gCDUjL7BqgDo-K_4Sv9wiGV2VM1bzDCcg9s' +app_code = 'general_qa' +user_token = 'KAtx6H4GK9LQPMx4zHA8hGMFf320EBmltlnTjrb1KCA' + +# user_code = 'avWbeA7o_Q5qARYJp0eSkqtxl3rpoBUV2v3EGQulZLE' + + +class ChatClient: + def __init__(self, app_code=app_code, user_token=user_token, app_token=None): + import os + import requests + + + self.app_code = app_code + self.user_token = user_token + + if app_token is not None: + self.app_token = app_token + else: + #self.app_token = self.get_app_token(app_code, user_token) + self.app_token = self.get_app_token() + + def get_app_token(self): + url = f"https://llm-center.ali.modelbest.cn/llm/client/token/access_token?appCode={self.app_code}&userToken={self.user_token}&expTime=3600" + payload={} + headers = { + 'User-Agent': 'Apifox/1.0.0 (https://apifox.com)' + } + response = requests.request("GET", url, headers=headers, data=payload, proxies={"http": None, "https": None}) + assert response.status_code == 200 + js= json.loads(response.text) + assert js['code']== 0 + return js['data'] + + # def get_app_token(self, app_code, user_token): + + # headers = {'User-Agent': 'Apifox/1.0.0 (https://apifox.com)'} + # res = requests.get( + # f'https://llm-center.ali.modelbest.cn/llm/client/token/access_token?appCode={app_code}&userToken={user_token}&expTime=3600', headers=headers) + # assert res.status_code == 200 + # js = json.loads(res.content) + # assert js['code'] == 0 + # return js['data'] + + def create_conversation(self, title='ocr_sft', user_id='tc'): + url = 'https://llm-center.ali.modelbest.cn/llm/client/conv/createConv' + headers = { + 'app-code': self.app_code, + 'app-token': self.app_token, + 'User-Agent': 'Apifox/1.0.0 (https://apifox.com)', + 'Content-Type': 'application/json' + } + data = {'title': title, 'userId': user_id, 'type': 'conv'} + res = requests.request("POST", url, json=data, headers=headers) + assert res.status_code == 200, f"status code: {res.status_code}" + js = json.loads(res.content) + assert js['code'] == 0 + return js['data'] + + def chat_sync(self, system_prompt='You are a helpful assistant.', user_prompt='', base64_image='', conv_id=None, model_id=36, max_tokens=4096, temperature=0.1, return_post_resp=False): + # print("In system prompt:", system_prompt) + # print("In user prompt:", user_prompt) + # print("model id:", model_id, flush=True) + + url = 'https://llm-center.ali.modelbest.cn/llm/client/conv/accessLargeModel/sync' + headers = { + 'app-code': self.app_code, + 'app-token': self.app_token, + 'User-Agent': 'Apifox/1.0.0 (https://apifox.com)', + 'Content-Type': 'application/json' + } + data = { + 'userSafe': 0, # disable user safe + 'aiSafe': 0, + 'modelId': model_id, # 15:GPT-4; 36: gpt4 1106 preview; 39; gpt4 vision preview; 32: gpt-3.5-turbo-1106 + 'sysPrompt': system_prompt, + 'generateType': "NORMAL", + 'chatMessage': [ + { + "msgId": "", + "role": "USER", # USER / AI + "contents": [ + { + "type": "TEXT", + "pairs": user_prompt + }, + { + "type": "IMAGE", + "pairs": f"data:image/jpg;base64,{base64_image}", + } + ], + "parentMsgId": "string", + } + ], + "modelParamConfig": { + "maxTokens": max_tokens, + "temperature": temperature, + } + } + + # drop empty image content, otherwise causing js['code'] == 0 + if not base64_image: + data['chatMessage'][0]['contents'].pop(1) + + res = requests.request("POST", url, json=data, headers=headers) + + if return_post_resp: + return res + + assert res.status_code == 200, f"res: {res}, status code: {res.status_code}\n【{user_prompt}】" + js = json.loads(res.content) + assert js['code'] == 0, f'{str(js)}\n【{user_prompt}】' + return js['data']['messages'][0]['content'], js + + def chat_sync_retry(self, system_prompt='You are a helpful assistant.', user_prompt='', base64_image='', conv_id=None, max_retry=3, model_id=36, max_tokens=4096, temperature=0.1, return_post_resp=False): + for i in range(max_retry): + try: + return self.chat_sync(system_prompt, user_prompt, base64_image, conv_id, model_id=model_id, max_tokens=max_tokens, temperature=temperature, return_post_resp=return_post_resp) + except Exception as err: + traceback.print_exc() + print(err) + time.sleep(3) + self.app_token = self.get_app_token() + return None + + +# if __name__ == '__main__': +# # get user_token from environment variable +# # user_token = os.environ.get('USER_TOKEN') +# # user_token = sys.argv[1] + +# chat = ChatClient() +# time_start = time.time() +# res = chat.chat_sync_retry(user_prompt='你是数学老师', model_id=36, max_tokens=10) +# time_end = time.time() +# print(res, time_end - time_start) + + # chat = Chat(model="gpt-4-1106-preview") + # result = chat.chat_completion( + # messages=[ + # {"role": "user", "content": "what is your name?"} + # ], + # ) + # print(result) diff --git a/vlmeval/api/lmdeploy.py b/vlmeval/api/lmdeploy.py new file mode 100644 index 0000000000000000000000000000000000000000..f70166da699e3139a051f9a73b70ff0075c97841 --- /dev/null +++ b/vlmeval/api/lmdeploy.py @@ -0,0 +1,314 @@ +# from http import HTTPStatus +import os +import requests +from ..dataset import DATASET_TYPE, DATASET_MODALITY +from vlmeval.api.base import BaseAPI +from vlmeval.smp import * + + +class InternVL2_PromptUtil: + + def __init__(self, use_mpo_prompt=False): + self.use_mpo_prompt = use_mpo_prompt + + def dump_image(self, line, dataset): + return self.dump_image_func(line) + + def use_custom_prompt(self, dataset): + assert dataset is not None + assert DATASET_MODALITY(dataset) != 'VIDEO', 'not supported' + if listinstr(['MMDU', 'MME-RealWorld', 'MME-RealWorld-CN'], dataset): + # For Multi-Turn we don't have custom prompt + return False + if DATASET_MODALITY(dataset) == 'VIDEO': + # For Video benchmarks we don't have custom prompt at here + return False + else: + return True + + def build_prompt(self, line, dataset=None): + assert self.use_custom_prompt(dataset) + assert dataset is None or isinstance(dataset, str) + from ..vlm.internvl.utils import (build_multi_choice_prompt, + build_mcq_cot_prompt, + build_qa_cot_prompt, + build_mpo_prompt, + reorganize_prompt) + + tgt_path = self.dump_image(line, dataset) + max_num = self.get_max_num(dataset) + if dataset is not None and DATASET_TYPE(dataset) == 'Y/N': + question = line['question'] + if listinstr(['MME'], dataset): + prompt = question + ' Answer the question using a single word or phrase.' + elif listinstr(['HallusionBench', 'AMBER'], dataset): + prompt = question + ' Please answer yes or no. Answer the question using a single word or phrase.' + else: + prompt = question + elif dataset is not None and DATASET_TYPE(dataset) == 'MCQ': + prompt = build_multi_choice_prompt(line, dataset) + if os.getenv('USE_COT') == '1': + prompt = build_mcq_cot_prompt(line, prompt) + elif dataset is not None and DATASET_TYPE(dataset) == 'VQA': + question = line['question'] + if listinstr(['LLaVABench', 'WildVision'], dataset): + prompt = question + '\nAnswer this question in detail.' + elif listinstr(['OCRVQA', 'TextVQA', 'ChartQA', 'DocVQA', 'InfoVQA', 'OCRBench', + 'DUDE', 'SLIDEVQA', 'GQA', 'MMLongBench_DOC'], dataset): + prompt = question + '\nAnswer the question using a single word or phrase.' + elif listinstr(['MathVista', 'MathVision', 'VCR', 'MTVQA', 'MMVet', 'MathVerse', + 'MMDU', 'CRPE', 'MIA-Bench', 'MM-Math', 'DynaMath', 'QSpatial', 'WeMath', 'LogicVista'], dataset): + prompt = question + if os.getenv('USE_COT') == '1': + prompt = build_qa_cot_prompt(line, prompt) + else: + prompt = question + '\nAnswer the question using a single word or phrase.' + else: + # VQA_ex_prompt: OlympiadBench, VizWiz + prompt = line['question'] + if os.getenv('USE_COT') == '1': + prompt = build_qa_cot_prompt(line, prompt) + + message = [dict(type='text', value=prompt)] + image_num = len(tgt_path) + max_num = max(1, min(max_num, 64 // image_num)) + # TODO:support upscale_flag + message.extend([dict(type='image', value=s, max_dynamic_patch=max_num) for s in tgt_path]) + + if self.use_mpo_prompt: + message = build_mpo_prompt(message, line, dataset) + + # reorganize_prompt + prompt = reorganize_prompt(message, image_num, dataset=dataset) + prompt.replace('', '') + message[0] = dict(type='text', value=prompt) + return message + + def get_max_num(self, dataset): + assert dataset is not None + res_1_datasets = ['MMBench-Video', 'Video-MME', 'MVBench', 'Video', 'WorldSense'] + res_12_datasets = ['ChartQA_TEST', 'MMMU_DEV_VAL', 'MMMU_TEST', 'MME-RealWorld', + 'VCR_EN', 'VCR_ZH', 'OCRVQA'] + res_18_datasets = ['DocVQA_VAL', 'DocVQA_TEST', 'DUDE', 'MMLongBench_DOC', 'SLIDEVQA'] + res_24_datasets = ['InfoVQA_VAL', 'InfoVQA_TEST', 'OCRBench', 'HRBench4K', 'HRBench8K'] + if listinstr(res_1_datasets, dataset): + return 1 + elif listinstr(res_12_datasets, dataset): + return 12 + elif listinstr(res_18_datasets, dataset): + return 18 + elif listinstr(res_24_datasets, dataset): + return 24 + else: + return 6 + + +class CogVLM2_PromptUtil: + + def dump_image(self, line, dataset): + return self.dump_image_func(line) + + def use_custom_prompt(self, dataset): + assert dataset is not None + if DATASET_TYPE(dataset) in 'MCQ': + return True + return False + + def build_prompt(self, line, dataset=None): + assert dataset is None or isinstance(dataset, str) + assert self.use_custom_prompt(dataset) + tgt_path = self.dump_image(line, dataset) + + if dataset is not None and DATASET_TYPE(dataset) == 'MCQ': + question = line['question'] + hint = line['hint'] if ('hint' in line and not pd.isna(line['hint'])) else None + if hint is not None: + question = hint + '\n' + question + + option_candidate = string.ascii_uppercase + options = { + cand: line[cand] + for cand in option_candidate + if cand in line and not pd.isna(line[cand]) + } + for key, item in options.items(): + question += f'\n{key}. {item}' + prompt = question + + if not cn_string(prompt): + prompt = prompt + '\n' + "Answer with the option's letter from the given choices directly." + else: + prompt = prompt + '\n' + '请直接回答选项字母。' + else: + prompt = line['question'] + message = [dict(type='text', value=prompt)] + message.extend([dict(type='image', value=p) for p in tgt_path]) + return message + + +class LMDeployWrapper(BaseAPI): + + is_api: bool = True + + custom_prompt: str = None + prompt_map = { + 'cogvlm2': CogVLM2_PromptUtil(), + 'internvl2': InternVL2_PromptUtil(), + 'internvl2-mpo-cot': InternVL2_PromptUtil(use_mpo_prompt=True), + } + + def __init__(self, + retry: int = 5, + wait: int = 5, + key: str = 'sk-123456', + verbose: bool = True, + temperature: float = 0.0, + timeout: int = 60, + api_base: str = None, + system_prompt: str = None, + max_tokens: int = 1024, + **kwargs): + self.fail_msg = 'Failed to obtain answer via API. ' + self.max_tokens = max_tokens + self.timeout = timeout + + key = os.environ.get('LMDEPLOY_API_KEY', key) + api_base = os.environ.get('LMDEPLOY_API_BASE', api_base) + assert key is not None, 'Please set the environment variable LMDEPLOY_API_KEY.' + assert api_base is not None, 'Please set the environment variable LMDEPLOY_API_BASE.' + self.key = key + self.api_base = api_base + super().__init__(wait=wait, retry=retry, system_prompt=system_prompt, verbose=verbose, **kwargs) + + model_url = ''.join([api_base.split('v1')[0], 'v1/models']) + resp = requests.get(model_url) + self.model = resp.json()['data'][0]['id'] + self.logger.info(f'lmdeploy evaluate model: {self.model}') + self.set_prompt_pattern(self.model) + if hasattr(self, 'custom_prompt'): + self.logger.info(f'using custom prompt {self.custom_prompt}') + self.temperature = temperature + self.logger.info(f'Init temperature: {self.temperature}') + + def set_dump_image(self, dump_image_func): + if self.custom_prompt in self.prompt_map: + self.prompt_map[self.custom_prompt].dump_image_func = dump_image_func + self.dump_image_func = dump_image_func + + def use_custom_prompt(self, dataset): + if self.custom_prompt in self.prompt_map: + return self.prompt_map[self.custom_prompt].use_custom_prompt(dataset) + return False + + def build_prompt(self, line, dataset=None): + if self.custom_prompt in self.prompt_map: + return self.prompt_map[self.custom_prompt].build_prompt(line, dataset) + raise NotImplementedError + + def set_prompt_pattern(self, model_name): + if 'Phi-3.5-Vision'.lower() in model_name.lower(): + self.max_tokens = 1000 + self.temperature = 0.0 + if 'cogvlm2-llama3-chat-19B'.lower() in model_name.lower(): + self.max_tokens = 2048 + self.temperature = 0.0 + self.custom_prompt = 'cogvlm2' + if 'InternVL2'.lower() in model_name.lower(): + self.max_tokens = 1024 + self.temperature = 0.0 + if 'mpo' in model_name.lower(): + self.max_tokens = 4096 + self.logger.info('Use custom prompt internvl2-mpo-cot') + self.custom_prompt = 'internvl2-mpo-cot' + else: + self.logger.info('Use custom prompt internvl2') + self.custom_prompt = 'internvl2' + if 'internvl2-8b-mpo-cot'.lower() in model_name.lower(): + self.use_mpo_prompt = True + self.max_tokens = 1024 + self.temperature = 0.0 + self.logger.info('Use custom prompt internvl2-mpo-cot') + self.custom_prompt = 'internvl2-mpo-cot' + if 'qvq'.lower() in model_name.lower(): + self.max_tokens = 4096 + self.temperature = 0.0 + self.logger.info('QVQ model detected, do not use custom prompt') + + def prepare_itlist(self, inputs): + assert np.all([isinstance(x, dict) for x in inputs]) + has_images = np.sum([x['type'] == 'image' for x in inputs]) + if has_images: + content_list = [] + for msg in inputs: + if msg['type'] == 'text': + content_list.append(dict(type='text', text=msg['value'])) + elif msg['type'] == 'image': + from PIL import Image + img = Image.open(msg['value']) + b64 = encode_image_to_base64(img) + extra_args = msg.copy() + extra_args.pop('type') + extra_args.pop('value') + img_struct = dict(url=f'data:image/jpeg;base64,{b64}', **extra_args) + content_list.append(dict(type='image_url', image_url=img_struct)) + else: + assert all([x['type'] == 'text' for x in inputs]) + text = '\n'.join([x['value'] for x in inputs]) + content_list = [dict(type='text', text=text)] + return content_list + + def prepare_inputs(self, inputs): + input_msgs = [] + if self.system_prompt is not None: + input_msgs.append(dict(role='system', content=self.system_prompt)) + assert isinstance(inputs, list) and isinstance(inputs[0], dict) + assert np.all(['type' in x for x in inputs]) or np.all(['role' in x for x in inputs]), inputs + if 'role' in inputs[0]: + assert inputs[-1]['role'] == 'user', inputs[-1] + for item in inputs: + input_msgs.append(dict(role=item['role'], content=self.prepare_itlist(item['content']))) + else: + input_msgs.append(dict(role='user', content=self.prepare_itlist(inputs))) + return input_msgs + + def generate_inner(self, inputs, **kwargs) -> str: + input_msgs = self.prepare_inputs(inputs) + + temperature = kwargs.pop('temperature', self.temperature) + self.logger.info(f'Generate temperature: {temperature}') + max_tokens = kwargs.pop('max_tokens', self.max_tokens) + + headers = {'Content-Type': 'application/json', 'Authorization': f'Bearer {self.key}'} + payload = dict( + model=self.model, + messages=input_msgs, + max_tokens=max_tokens, + n=1, + temperature=temperature, + **kwargs) + response = requests.post( + self.api_base, + headers=headers, data=json.dumps(payload), timeout=self.timeout * 1.1) + ret_code = response.status_code + ret_code = 0 if (200 <= int(ret_code) < 300) else ret_code + answer = self.fail_msg + try: + resp_struct = json.loads(response.text) + answer = resp_struct['choices'][0]['message']['content'].strip() + + # for internvl2-8b-mpo-cot + if getattr(self, 'use_mpo_prompt', False): + from ..vlm.internvl.utils import mpo_post_processing + answer = mpo_post_processing(answer, kwargs.get('dataset')) + except: + pass + return ret_code, answer, response + + +class LMDeployAPI(LMDeployWrapper): + + def __init__(self, **kwargs): + super().__init__(**kwargs) + + def generate(self, message, dataset=None): + return super(LMDeployAPI, self).generate(message, dataset=dataset) diff --git a/vlmeval/api/mug_u.py b/vlmeval/api/mug_u.py new file mode 100644 index 0000000000000000000000000000000000000000..e9ff3136e3598c6a7e2a22cc62d2ce9d4a9db82a --- /dev/null +++ b/vlmeval/api/mug_u.py @@ -0,0 +1,214 @@ +# from http import HTTPStatus +import os +import requests +from ..dataset import DATASET_TYPE, DATASET_MODALITY +from vlmeval.api.base import BaseAPI +from vlmeval.smp import * +class MUGUWrapper(BaseAPI): + + is_api: bool = True + + def __init__(self, + model: str, + retry: int = 5, + wait: int = 5, + key: str = None, + verbose: bool = True, + temperature: float = 0.0, + timeout: int = 60, + api_base: str = None, + system_prompt: str = None, + max_tokens: int = 4096, + use_mpo_prompt: bool = False, + **kwargs): + self.fail_msg = 'Failed to obtain answer via API. ' + self.max_tokens = max_tokens + self.timeout = timeout + + api_base = 'https://shopee.sg/api/v1/compassllvm/v1/chat/completions' + assert api_base is not None, 'Please set the environment variable LMDEPLOY_API_BASE.' + self.api_base = api_base + super().__init__(wait=wait, retry=retry, system_prompt=system_prompt, verbose=verbose, **kwargs) + + model_url = ''.join([api_base.split('v1')[0], 'v1/models']) + resp = requests.get(model_url) + self.model = model + if hasattr(self, 'custom_prompt'): + self.logger.info(f'using custom prompt {self.custom_prompt}') + self.temperature = temperature + self.logger.info(f'Init temperature: {self.temperature}') + self.use_mpo_prompt = use_mpo_prompt + + self.temperature = 0.0 + + def dump_image(self, line, dataset): + return self.dump_image_func(line) + + def set_dump_image(self, dump_image_func): + self.dump_image_func = dump_image_func + + def use_custom_prompt(self, dataset): + assert dataset is not None + assert DATASET_MODALITY(dataset) != 'VIDEO', 'not supported' + if listinstr(['MMDU', 'MME-RealWorld', 'MME-RealWorld-CN'], dataset): + # For Multi-Turn we don't have custom prompt + return False + if DATASET_MODALITY(dataset) == 'VIDEO': + # For Video benchmarks we don't have custom prompt at here + return False + else: + return True + + def get_max_num(self, dataset): + assert dataset is not None + res_1_datasets = ['MMBench-Video', 'Video-MME', 'MVBench', 'Video', 'WorldSense'] + res_12_datasets = ['ChartQA_TEST', 'MMMU_DEV_VAL', 'MMMU_TEST', 'MME-RealWorld', + 'VCR_EN', 'VCR_ZH', 'OCRVQA'] + res_18_datasets = ['DocVQA_VAL', 'DocVQA_TEST', 'DUDE', 'MMLongBench_DOC', 'SLIDEVQA'] + res_24_datasets = ['InfoVQA_VAL', 'InfoVQA_TEST', 'OCRBench', 'HRBench4K', 'HRBench8K'] + if listinstr(res_1_datasets, dataset): + return 1 + elif listinstr(res_12_datasets, dataset): + return 12 + elif listinstr(res_18_datasets, dataset): + return 18 + elif listinstr(res_24_datasets, dataset): + return 24 + else: + return 6 + + def build_prompt(self, line, dataset=None): + assert self.use_custom_prompt(dataset) + assert dataset is None or isinstance(dataset, str) + from ..vlm.internvl.utils import (build_multi_choice_prompt, + build_mcq_cot_prompt, + build_qa_cot_prompt, + build_mpo_prompt, + reorganize_prompt) + + tgt_path = self.dump_image(line, dataset) + max_num = self.get_max_num(dataset) + if dataset is not None and DATASET_TYPE(dataset) == 'Y/N': + question = line['question'] + if listinstr(['MME'], dataset): + prompt = question + ' Answer the question using a single word or phrase.' + elif listinstr(['HallusionBench', 'AMBER'], dataset): + prompt = question + ' Please answer yes or no. Answer the question using a single word or phrase.' + else: + prompt = question + elif dataset is not None and DATASET_TYPE(dataset) == 'MCQ': + prompt = build_multi_choice_prompt(line, dataset) + if os.getenv('USE_COT') == '1': + prompt = build_mcq_cot_prompt(line, prompt) + elif dataset is not None and DATASET_TYPE(dataset) == 'VQA': + question = line['question'] + if listinstr(['LLaVABench', 'WildVision'], dataset): + prompt = question + '\nAnswer this question in detail.' + elif listinstr(['OCRVQA', 'TextVQA', 'ChartQA', 'DocVQA', 'InfoVQA', 'OCRBench', + 'DUDE', 'SLIDEVQA', 'GQA', 'MMLongBench_DOC'], dataset): + prompt = question + '\nAnswer the question using a single word or phrase.' + elif listinstr(['MathVista', 'MathVision', 'VCR', 'MTVQA', 'MMVet', 'MathVerse', + 'MMDU', 'CRPE', 'MIA-Bench', 'MM-Math', 'DynaMath', 'QSpatial', 'WeMath', 'LogicVista'], dataset): + prompt = question + if os.getenv('USE_COT') == '1': + prompt = build_qa_cot_prompt(line, prompt) + else: + prompt = question + '\nAnswer the question using a single word or phrase.' + else: + # VQA_ex_prompt: OlympiadBench, VizWiz + prompt = line['question'] + if os.getenv('USE_COT') == '1': + prompt = build_qa_cot_prompt(line, prompt) + + message = [dict(type='text', value=prompt)] + image_num = len(tgt_path) + max_num = max(1, min(max_num, 64 // image_num)) + # TODO:support upscale_flag + message.extend([dict(type='image', value=s, max_dynamic_patch=max_num) for s in tgt_path]) + + if self.use_mpo_prompt: + message = build_mpo_prompt(message, line, dataset) + + # reorganize_prompt + prompt = reorganize_prompt(message, image_num, dataset=dataset) + prompt.replace('', '') + message[0] = dict(type='text', value=prompt) + return message + + def prepare_itlist(self, inputs): + assert np.all([isinstance(x, dict) for x in inputs]) + has_images = np.sum([x['type'] == 'image' for x in inputs]) + if has_images: + content_list = [] + for msg in inputs: + if msg['type'] == 'text': + content_list.append(dict(type='text', text=msg['value'])) + elif msg['type'] == 'image': + from PIL import Image + img = Image.open(msg['value']) + b64 = encode_image_to_base64(img) + extra_args = msg.copy() + extra_args.pop('type') + extra_args.pop('value') + img_struct = dict(url=f'data:image/jpeg;base64,{b64}', **extra_args) + content_list.append(dict(type='image_url', image_url=img_struct)) + else: + assert all([x['type'] == 'text' for x in inputs]) + text = '\n'.join([x['value'] for x in inputs]) + content_list = [dict(type='text', text=text)] + return content_list + + def prepare_inputs(self, inputs): + input_msgs = [] + if self.system_prompt is not None: + input_msgs.append(dict(role='system', content=self.system_prompt)) + assert isinstance(inputs, list) and isinstance(inputs[0], dict) + assert np.all(['type' in x for x in inputs]) or np.all(['role' in x for x in inputs]), inputs + if 'role' in inputs[0]: + assert inputs[-1]['role'] == 'user', inputs[-1] + for item in inputs: + input_msgs.append(dict(role=item['role'], content=self.prepare_itlist(item['content']))) + else: + input_msgs.append(dict(role='user', content=self.prepare_itlist(inputs))) + return input_msgs + + def generate_inner(self, inputs, **kwargs) -> str: + input_msgs = self.prepare_inputs(inputs) + + temperature = kwargs.pop('temperature', self.temperature) + self.logger.info(f'Generate temperature: {temperature}') + max_tokens = kwargs.pop('max_tokens', self.max_tokens) + + headers = {'Content-Type': 'application/json'} + payload = dict( + model=self.model, + messages=input_msgs, + max_tokens=max_tokens, + n=1, + top_k=1, + temperature=temperature, + stream=False, + **kwargs) + + response = requests.post( + self.api_base, + headers=headers, data=json.dumps(payload), timeout=self.timeout * 1.1) + ret_code = response.status_code + ret_code = 0 if (200 <= int(ret_code) < 300) else ret_code + answer = self.fail_msg + try: + resp_struct = json.loads(response.text) + answer = resp_struct['choices'][0]['message']['content'].strip() + + # for internvl2-8b-mpo-cot + if getattr(self, 'use_mpo_prompt', False): + from ..vlm.internvl.utils import mpo_post_processing + answer = mpo_post_processing(answer, kwargs.get('dataset')) + except: + pass + return ret_code, answer, response + + +class MUGUAPI(MUGUWrapper): + def generate(self, message, dataset=None): + return super(MUGUAPI, self).generate(message, dataset=dataset) \ No newline at end of file diff --git a/vlmeval/api/qwen_api.py b/vlmeval/api/qwen_api.py new file mode 100644 index 0000000000000000000000000000000000000000..6092ecffbc38363d6b3aee8807ffeb93743673f5 --- /dev/null +++ b/vlmeval/api/qwen_api.py @@ -0,0 +1,75 @@ +from http import HTTPStatus +import os +from vlmeval.api.base import BaseAPI +from vlmeval.smp import * + + +# Note: This is a pure language model API. +class QwenAPI(BaseAPI): + + is_api: bool = True + + def __init__(self, + model: str = 'qwen-max-1201', + retry: int = 5, + wait: int = 5, + verbose: bool = True, + seed: int = 2680, + temperature: float = 0.0, + system_prompt: str = None, + key: str = None, + max_tokens: int = 2048, + proxy: str = None, + **kwargs): + + assert model in ['qwen-turbo', 'qwen-plus', 'qwen-max', 'qwen-max-1201', 'qwen-max-longcontext'] + self.model = model + import dashscope + self.fail_msg = 'Failed to obtain answer via API. ' + self.max_tokens = max_tokens + self.temperature = temperature + self.seed = seed + if key is None: + key = os.environ.get('DASHSCOPE_API_KEY', None) + assert key is not None, ( + 'Please set the API Key (obtain it here: ' + 'https://help.aliyun.com/zh/dashscope/developer-reference/vl-plus-quick-start)' + ) + dashscope.api_key = key + if proxy is not None: + proxy_set(proxy) + super().__init__(wait=wait, retry=retry, system_prompt=system_prompt, verbose=verbose, **kwargs) + + @staticmethod + def build_msgs(msgs_raw, system_prompt=None): + msgs = cp.deepcopy(msgs_raw) + ret = [] + if system_prompt is not None: + ret.append(dict(role='system', content=system_prompt)) + for i, msg in enumerate(msgs): + role = 'user' if i % 2 == 0 else 'assistant' + ret.append(dict(role=role, content=msg)) + return ret + + def generate_inner(self, inputs, **kwargs) -> str: + from dashscope import MultiModalConversation + assert isinstance(inputs, str) or isinstance(inputs, list) + inputs = [inputs] if isinstance(inputs, str) else inputs + messages = self.build_msgs(msgs_raw=inputs, system_prompt=self.system_prompt) + + import dashscope + response = dashscope.Generation.call( + model=self.model, + messages=messages, + seed=self.seed, + temperature=self.temperature, + max_tokens=self.max_tokens, + result_format='message', # set the result to be "message" format. + ) + if response.status_code != HTTPStatus.OK: + return -1, 'Error: Bad Response Statuse Code. ', f'The response status code is {response.status_code}. ' + + try: + return 0, response['output']['choices'][0]['message']['content'].strip(), 'Succeeded! ' + except Exception as err: + return -1, f'Error: Failed to parse the response. {err}', response diff --git a/vlmeval/api/qwen_vl_api.py b/vlmeval/api/qwen_vl_api.py new file mode 100644 index 0000000000000000000000000000000000000000..3f4dc1834ff901d202164fbc17f7eb884141c97c --- /dev/null +++ b/vlmeval/api/qwen_vl_api.py @@ -0,0 +1,219 @@ +from __future__ import annotations + +import os +import warnings + +from vlmeval.smp import * +from vlmeval.api.base import BaseAPI +from vlmeval.vlm.qwen2_vl.prompt import Qwen2VLPromptMixin + + +def ensure_image_url(image: str) -> str: + prefixes = ['http://', 'https://', 'file://', 'data:image;'] + if any(image.startswith(prefix) for prefix in prefixes): + return image + if os.path.exists(image): + return 'file://' + image + raise ValueError(f'Invalid image: {image}') + + +class Qwen2VLAPI(Qwen2VLPromptMixin, BaseAPI): + is_api: bool = True + + def __init__( + self, + model: str = 'qwen-vl-max-0809', + key: str | None = None, + min_pixels: int | None = None, + max_pixels: int | None = None, + max_length=1024, + top_p=0.001, + top_k=1, + temperature=0.01, + repetition_penalty=1.0, + presence_penalty=0.0, + seed=3407, + use_custom_prompt: bool = True, + **kwargs, + ): + import dashscope + + self.model = model + self.min_pixels = min_pixels + self.max_pixels = max_pixels + self.generate_kwargs = dict( + max_length=max_length, + top_p=top_p, + top_k=top_k, + temperature=temperature, + repetition_penalty=repetition_penalty, + presence_penalty=presence_penalty, + seed=seed, + ) + + key = os.environ.get('DASHSCOPE_API_KEY', None) if key is None else key + assert key is not None, ( + 'Please set the API Key (obtain it here: ' + 'https://help.aliyun.com/zh/dashscope/developer-reference/vl-plus-quick-start)' + ) + dashscope.api_key = key + super().__init__(use_custom_prompt=use_custom_prompt, **kwargs) + + def _prepare_content(self, inputs: list[dict[str, str]], dataset: str | None = None) -> list[dict[str, str]]: + """ + inputs list[dict[str, str]], each dict has keys: ['type', 'value'] + """ + content = [] + for s in inputs: + if s['type'] == 'image': + item = {'type': 'image', 'image': ensure_image_url(s['value'])} + if dataset == 'OCRBench': + item['min_pixels'] = 10 * 10 * 28 * 28 + warnings.warn(f"OCRBench dataset uses custom min_pixels={item['min_pixels']}") + if self.max_pixels is not None: + item['max_pixels'] = self.max_pixels + else: + if self.min_pixels is not None: + item['min_pixels'] = self.min_pixels + if self.max_pixels is not None: + item['max_pixels'] = self.max_pixels + elif s['type'] == 'text': + item = {'type': 'text', 'text': s['value']} + else: + raise ValueError(f"Invalid message type: {s['type']}, {s}") + content.append(item) + return content + + def generate_inner(self, inputs, **kwargs) -> str: + import dashscope + + messages = [] + if self.system_prompt is not None: + messages.append({'role': 'system', 'content': self.system_prompt}) + messages.append( + {'role': 'user', 'content': self._prepare_content(inputs, dataset=kwargs.get('dataset', None))} + ) + if self.verbose: + print(f'\033[31m{messages}\033[0m') + + # generate + generation_kwargs = self.generate_kwargs.copy() + kwargs.pop('dataset', None) + generation_kwargs.update(kwargs) + try: + response = dashscope.MultiModalConversation.call( + model=self.model, + messages=messages, + **generation_kwargs, + ) + if self.verbose: + print(response) + answer = response.output.choices[0]['message']['content'][0]['text'] + return 0, answer, 'Succeeded! ' + except Exception as err: + if self.verbose: + self.logger.error(f'{type(err)}: {err}') + self.logger.error(f'The input messages are {inputs}.') + return -1, '', '' + + +class QwenVLWrapper(BaseAPI): + + is_api: bool = True + + def __init__(self, + model: str = 'qwen-vl-plus', + retry: int = 5, + wait: int = 5, + key: str = None, + verbose: bool = True, + temperature: float = 0.0, + system_prompt: str = None, + max_tokens: int = 2048, + proxy: str = None, + **kwargs): + + assert model in ['qwen-vl-plus', 'qwen-vl-max'] + self.model = model + import dashscope + self.fail_msg = 'Failed to obtain answer via API. ' + self.max_tokens = max_tokens + self.temperature = temperature + if key is None: + key = os.environ.get('DASHSCOPE_API_KEY', None) + assert key is not None, ( + 'Please set the API Key (obtain it here: ' + 'https://help.aliyun.com/zh/dashscope/developer-reference/vl-plus-quick-start)' + ) + dashscope.api_key = key + if proxy is not None: + proxy_set(proxy) + super().__init__(wait=wait, retry=retry, system_prompt=system_prompt, verbose=verbose, **kwargs) + + # inputs can be a lvl-2 nested list: [content1, content2, content3, ...] + # content can be a string or a list of image & text + def prepare_itlist(self, inputs): + assert np.all([isinstance(x, dict) for x in inputs]) + has_images = np.sum([x['type'] == 'image' for x in inputs]) + if has_images: + content_list = [] + for msg in inputs: + if msg['type'] == 'text': + content_list.append(dict(text=msg['value'])) + elif msg['type'] == 'image': + content_list.append(dict(image='file://' + msg['value'])) + else: + assert all([x['type'] == 'text' for x in inputs]) + text = '\n'.join([x['value'] for x in inputs]) + content_list = [dict(text=text)] + return content_list + + def prepare_inputs(self, inputs): + input_msgs = [] + if self.system_prompt is not None: + input_msgs.append(dict(role='system', content=self.system_prompt)) + assert isinstance(inputs, list) and isinstance(inputs[0], dict) + assert np.all(['type' in x for x in inputs]) or np.all(['role' in x for x in inputs]), inputs + if 'role' in inputs[0]: + assert inputs[-1]['role'] == 'user', inputs[-1] + for item in inputs: + input_msgs.append(dict(role=item['role'], content=self.prepare_itlist(item['content']))) + else: + input_msgs.append(dict(role='user', content=self.prepare_itlist(inputs))) + return input_msgs + + def generate_inner(self, inputs, **kwargs) -> str: + from dashscope import MultiModalConversation + assert isinstance(inputs, str) or isinstance(inputs, list) + + if 'type' in inputs[0]: + pure_text = np.all([x['type'] == 'text' for x in inputs]) + else: + pure_text = True + for inp in inputs: + if not np.all([x['type'] == 'text' for x in inp['content']]): + pure_text = False + break + + assert not pure_text + messages = self.prepare_inputs(inputs) + gen_config = dict(max_output_tokens=self.max_tokens, temperature=self.temperature) + gen_config.update(kwargs) + try: + response = MultiModalConversation.call(model=self.model, messages=messages) + if self.verbose: + print(response) + answer = response.output.choices[0]['message']['content'][0]['text'] + return 0, answer, 'Succeeded! ' + except Exception as err: + if self.verbose: + self.logger.error(f'{type(err)}: {err}') + self.logger.error(f'The input messages are {inputs}.') + + return -1, '', '' + + +class QwenVLAPI(QwenVLWrapper): + + def generate(self, message, dataset=None): + return super(QwenVLAPI, self).generate(message) diff --git a/vlmeval/api/siliconflow.py b/vlmeval/api/siliconflow.py new file mode 100644 index 0000000000000000000000000000000000000000..2f5b723f22b72b6f97d5f3634953e106391ec79f --- /dev/null +++ b/vlmeval/api/siliconflow.py @@ -0,0 +1,276 @@ +import math +from vlmeval.smp import * +from vlmeval.api.base import BaseAPI +from vlmeval.dataset import img_root_map + +API_BASE = "https://api.siliconflow.cn/v1/chat/completions" + + +def resize_image(image: Image.Image, max_height: int, max_width: int) -> Image.Image: + width, height = image.size + if min(width, height) < 50: + scale = 50 / min(width, height) + image = image.resize((int(width * scale), int(height * scale))) + current_pixels = width * height + + if current_pixels <= max_height * max_width: + return image + + scale = math.sqrt(max_height * max_width / current_pixels) + new_width = int(width * scale) + new_height = int(height * scale) + + return image.resize((new_width, new_height), Image.Resampling.LANCZOS) + + +def encode_image(path: str, max_height: int = 1024, max_width: int = 1024) -> str: + image = Image.open(path).convert("RGB") + image = resize_image(image, max_height, max_width) + width, height = image.size + if min(height, width) < 50: + scale = 50 / min(width, height) + image = image.resize((int(width * scale), int(height * scale))) + buffered = io.BytesIO() + image.save(buffered, format="PNG") + img_bytes = buffered.getvalue() + img_base64 = base64.b64encode(img_bytes).decode("utf-8") + return img_base64 + + +class SiliconFlowAPI(BaseAPI): + + is_api: bool = True + + def __init__( + self, + model: str = "deepseek-ai/DeepSeek-V2.5", + retry: int = 5, + wait: int = 5, + key: str = None, + api_base: str = API_BASE, + verbose: bool = True, + system_prompt: str = None, + timeout: int = 60, + reasoning: bool = False, # If set, will return results in the format of {'content': '...', 'reasoning': '...'} + **kwargs, + ): + + self.model = model + self.api_base = api_base + self.reasoning = reasoning + self.timeout = timeout + + default_kwargs = { + "stream": False, + "temperature": 0, + "n": 1, + "max_tokens": 1280, + } + for k, v in default_kwargs.items(): + if k not in kwargs: + kwargs[k] = default_kwargs[k] + if key is not None: + self.key = key + else: + self.key = os.environ.get("SiliconFlow_API_KEY", "") + headers = {"Authorization": "Bearer {}", "Content-Type": "application/json"} + headers["Authorization"] = headers["Authorization"].format(self.key) + self.headers = headers + super().__init__( + wait=wait, + retry=retry, + system_prompt=system_prompt, + verbose=verbose, + **kwargs, + ) + + @staticmethod + def build_msgs(msgs_raw): + messages = [] + message = {"role": "user", "content": []} + image_b64 = None + for msg in msgs_raw: + if msg["type"] == "image" and not image_b64: + image_b64 = encode_image(msg["value"]) + message["content"].append( + {"image_url": {"url": image_b64}, "type": "image_url"} + ) + elif msg["type"] == "text": + message["content"].append({"text": msg["value"], "type": "text"}) + + messages.append(message) + return messages + + def generate_inner(self, inputs, **kwargs) -> str: + default_kwargs = self.default_kwargs + default_kwargs.update(kwargs) + + payload = dict( + model=self.model, + messages=self.build_msgs(msgs_raw=inputs), + **default_kwargs, + ) + + response = requests.post( + self.api_base, headers=self.headers, data=json.dumps(payload), timeout=self.timeout * 1.1 + ) + ret_code = response.status_code + ret_code = 0 if (200 <= int(ret_code) < 300) else ret_code + + answer = self.fail_msg + try: + resp_struct = json.loads(response.text) + msg = resp_struct["choices"][0]["message"] + if self.reasoning and 'reasoning_content' in msg: + answer = {'content': msg['content'], 'reasoning': msg['reasoning_content']} + else: + answer = resp_struct["choices"][0]["message"]["content"].strip() + except: + pass + return ret_code, answer, response + + +class TeleMMAPI(SiliconFlowAPI): + + is_api: bool = True + + def __init__( + self, + model: str = "TeleAI/TeleMM", + key: str = None, + max_height: int = 1280, + max_width: int = 784, + **kwargs, + ): + super().__init__(model=model, key=key, **kwargs) + self.max_height = max_height + self.max_width = max_width + + def dump_image(self, line, dataset): + """Dump the image(s) of the input line to the corresponding dataset folder. + + Args: + line (line of pd.DataFrame): The raw input line. + dataset (str): The name of the dataset. + + Returns: + str | list[str]: The paths of the dumped images. + """ + ROOT = LMUDataRoot() + assert isinstance(dataset, str) + # img_root = osp.join(ROOT, 'images', img_root_map[dataset] if dataset in img_root_map else dataset) + img_root = osp.join(ROOT, "images", img_root_map(dataset)) + os.makedirs(img_root, exist_ok=True) + if "image" in line: + if isinstance(line["image"], list): + tgt_path = [] + assert "image_path" in line + for img, im_name in zip(line["image"], line["image_path"]): + path = osp.join(img_root, im_name) + if not read_ok(path): + decode_base64_to_image_file(img, path) + tgt_path.append(path) + else: + tgt_path = osp.join(img_root, f"{line['index']}.jpg") + if not read_ok(tgt_path): + decode_base64_to_image_file(line["image"], tgt_path) + tgt_path = [tgt_path] + else: + assert "image_path" in line + tgt_path = toliststr(line["image_path"]) + return tgt_path + + def _prepare_content( + self, inputs: list[dict[str, str]], dataset: str = None + ) -> list[dict[str, str]]: + """ + inputs list[dict[str, str]], each dict has keys: ['type', 'value'] + """ + content = [] + has_image = False + for s in inputs: + if s["type"] == "image": + if not has_image: + item = { + "type": "image_url", + "image_url": { + "url": encode_image( + s["value"], + max_height=self.max_height, + max_width=self.max_width, + ) + }, + } + has_image = True + else: + continue + elif s["type"] == "text": + prompt = s["value"] + if len(prompt) == 0: + continue + if dataset == "HallusionBench": + prompt += " Please answer yes or no directly, without any unnecessary explanation." + elif dataset == "OCRBench": + prompt = ( + prompt + "\nExtract the text from the image intactly and " + + "answer the question concisely and clearly if possible." + ) + + elif ( + dataset == "AI2D_TEST" + or dataset == "MMStar" + or dataset == "MMBench_TEST_EN_V11" + or dataset == "MMVet" + ): + prompt = prompt.replace( + "Please select the correct answer from the options above. \n", + "Please select the correct option from the above choices based on the " + + "input image and question. The final output should only be one option, such as 'A'", + ) + elif dataset == "MMBench_TEST_CN_V11": + prompt = prompt.replace( + "Please select the correct answer from the options above. \n", + "请根据输入图像和问题从上述选项中选择正确选项,最终的输出只有一个选项,例如'A'", + ) + item = {"type": "text", "text": prompt} + else: + raise ValueError(f"Invalid message type: {s['type']}, {s}") + content.append(item) + + return content + + def generate_inner(self, inputs, **kwargs) -> str: + default_kwargs = self.default_kwargs + default_kwargs.update(kwargs) + + messages = [] + messages.append( + { + "role": "user", + "content": self._prepare_content( + inputs, dataset=kwargs.get("dataset", None) + ), + } + ) + + payload = dict(model=self.model, messages=messages, **default_kwargs) + + response = requests.post( + self.api_base, headers=self.headers, data=json.dumps(payload) + ) + ret_code = response.status_code + ret_code = 0 if (200 <= int(ret_code) < 300) else ret_code + + answer = self.fail_msg + try: + resp_struct = json.loads(response.text) + answer = resp_struct["choices"][0]["message"]["content"].strip() + return ret_code, answer, response + except Exception as err: + import traceback + + traceback.print_exc() + if self.verbose: + self.logger.error(f"{type(err)}: {err}") + self.logger.error(f"The input messages are {inputs}.") + return -1, "", "" diff --git a/vlmeval/api/taichu.py b/vlmeval/api/taichu.py new file mode 100644 index 0000000000000000000000000000000000000000..dd313330d4c977c70e7840dcffe7612bcf3149c6 --- /dev/null +++ b/vlmeval/api/taichu.py @@ -0,0 +1,363 @@ +from vlmeval.smp import * +from vlmeval.api.base import BaseAPI +import os +import re +import json + +from PIL import Image +import base64 +from io import BytesIO +import copy + + +class ChatResponse(dict): + def __getattr__(self, name): + value = self.get(name) + if isinstance(value, dict): + return ChatResponse(value) # 如果值是字典,递归包装成 DotDict + elif isinstance(value, list): + return [ChatResponse(v) if isinstance(v, dict) else v for v in value] # 如果值是列表,处理其中的字典 + return value + + def __setattr__(self, name, value): + self[name] = value + + def __delattr__(self, name): + del self[name] + + +from ..dataset import DATASET_TYPE + + +class TaichuVLWrapper(BaseAPI): + is_api: bool = True + + def __init__(self, + model: str = 'Taichu-VL-2B', + retry: int = 5, + wait: int = 5, + verbose: bool = True, + temperature: float = 0.0, + system_prompt: str = None, + max_tokens: int = 4096, + key: str = None, + url: str = None, + **kwargs): + + self.model = model + self.kwargs = kwargs + self.max_tokens = max_tokens + + self.system_prompt = '[sys]You are a helpful assistant.[/sys]' + self.hint_prompt = '||' + self.mcq_prompt = '||' + + self.datasets_use_system = ['MMVet'] + self.datasets_use_multichoice = [ + 'MathVista', 'MathVision'] + + openai_key = os.environ.get('OPENAI_API_KEY', None) + use_openai = os.environ.get('USE_OPENAI_EVAL', True) + self.use_openai_evaluate = (isinstance(openai_key, str) and openai_key.startswith('sk-') and use_openai) + + self.api_key = os.environ.get('TAICHU_API_KEY', key) + self.api_url = url + + assert self.api_key is not None, 'Please set the API Key' + + super().__init__(wait=wait, retry=retry, system_prompt=self.system_prompt, verbose=verbose, **kwargs) + + def set_dump_image(self, dump_image_func): + self.dump_image_func = dump_image_func + + def dump_image(self, line, dataset): + return self.dump_image_func(line) + + def use_custom_prompt(self, dataset): + if listinstr(['MCQ', 'VQA'], DATASET_TYPE(dataset)): + return True + elif dataset is not None and listinstr(['HallusionBench'], dataset): + return True + return False + + def clear_prompt(self, prompt): + prompt = re.sub(r"Hint:.*?Question:", "", prompt, flags=re.S).strip() + prompt = re.sub(r"\nChoices:\n.*", "", prompt, flags=re.S).strip() + return prompt + + def encode_image(self, pil_image): + buffer = BytesIO() + pil_image.save(buffer, format='PNG') + base64_str = base64.b64encode(buffer.getvalue()).decode("utf-8") + return base64_str + + def build_prompt(self, line, dataset=None): + if isinstance(line, int): + line = self.data.iloc[line] + + tgt_path = self.dump_image(line, dataset) + question = line['question'] + hint = None + if listinstr(self.datasets_use_system, dataset): + system_prompt = self.system_prompt + else: + system_prompt = '' + mcq = False + if DATASET_TYPE(dataset) == 'MCQ' or listinstr(self.datasets_use_multichoice, dataset): + options = { + cand: line[cand] + for cand in string.ascii_uppercase + if cand in line and not pd.isna(line[cand]) + } + if listinstr(self.datasets_use_multichoice, dataset): + options = {} + if not pd.isna(line['choices']): + for i, c in enumerate(eval(line['choices'])): + options[string.ascii_uppercase[i]] = c + question = self.clear_prompt(question) + + # support chinese + if listinstr(['_CN', '_cn'], dataset): + options_prompt = '\n选项:\n' + else: + options_prompt = '\nOPTIONS:\n' + options_prompt += '\n'.join(f"{key}:{value}" for key, value in options.items()) + hint = line['hint'] if ('hint' in line and not pd.isna(line['hint'])) else None + mcq = True if len(options) else False + if len(options): + prompt = question + options_prompt + else: + prompt = question + else: + prompt = question + + msgs = [] + if system_prompt: + msgs.append(dict(type='text', value=system_prompt)) + + if isinstance(tgt_path, list): + msgs.extend([dict(type='image', value=p) for p in tgt_path]) + else: + msgs.append(dict(type='image', value=tgt_path)) + + if hint: + prompt = 'Hint: ' + hint + '\n' + prompt + msgs.append(dict(type='text', value=prompt)) + + if mcq: + msgs.append(dict(type='text', value=self.mcq_prompt)) + return msgs + + def prompt_to_request_messages(self, inputs): + + messages = [ + {'role': 'user', 'content': []} + ] + is_mcq = False + for x in inputs: + if x['type'] == 'text': + if x['value'] == self.system_prompt: + messages = [{'role': 'system', 'content': [{"type": "text", "text": x['value']}]}] + messages + elif self.mcq_prompt == x['value']: + is_mcq = True + else: + messages[-1]['content'].append( + {"type": "text", "text": x['value']}, + ) + if x['type'] == 'image': + _url = self.encode_image(Image.open(x['value'])) + messages[-1]['content'].append( + {"type": "image_url", "image_url": {"url": f"data:image/jpeg;base64,{_url}"}}, + ) + else: + continue + + return messages, is_mcq + + def generate_inner(self, inputs, **kwargs) -> str: + messages, is_mcq = self.prompt_to_request_messages(inputs) + + data = { + "model": self.model, + "messages": messages, + "max_tokens": self.max_tokens, + "temperature": 0, + "top_p": 0.8, + "stream": False, + "extra_body": { + "repetition_penalty": 1 + } + } + + headers = { + 'Authorization': self.api_key, + 'Content-Type': 'application/json' + } + + try: + chat_response = requests.post(self.api_url, json=data, headers=headers) + response = ChatResponse(json.loads(chat_response.content)) + result = response.choices[0].message.content + # Extract index to exact matching when ChatGPT is unavailable. + if self.use_openai_evaluate is False and is_mcq is True: + try: + result = result[0] + except: + result = 'A' + return 0, result, 'Succeeded! ' + except Exception as err: + if self.verbose: + self.logger.error(f'{type(err)}: {err}') + self.logger.error(f'The input messages are {inputs}.') + return -1, '', '' + + +class TaichuVLAPI(TaichuVLWrapper): + + def generate(self, message, dataset=None): + return super(TaichuVLAPI, self).generate(message, dataset=dataset) + + +class TaichuVLRWrapper(BaseAPI): + is_api: bool = True + + def __init__(self, + model: str = 'taichu_vlr_3b', + retry: int = 5, + wait: int = 5, + verbose: bool = True, + temperature: float = 0.0, + system_prompt: str = None, + max_tokens: int = 4096, + use_reasoning_prompt: bool = True, + post_process: bool = True, + key: str = None, + url: str = None, + **kwargs): + + self.model = model + self.kwargs = kwargs + self.max_tokens = max_tokens + self.system_prompt = system_prompt + self.use_reasoning_prompt = use_reasoning_prompt + self.post_process = post_process + self.verbose = verbose + + openai_key = os.environ.get('OPENAI_API_KEY', None) + use_openai = os.environ.get('USE_OPENAI_EVAL', True) + self.use_openai_evaluate = (isinstance(openai_key, str) and openai_key.startswith('sk-') and use_openai) + + self.api_key = os.environ.get('TAICHU_API_KEY', key) + self.api_url = url + + assert self.api_key is not None, 'Please set the API Key' + + super().__init__(wait=wait, retry=retry, system_prompt=self.system_prompt, verbose=verbose, **kwargs) + + def use_custom_prompt(self, dataset): + return False + + def encode_image(self, pil_image): + buffer = BytesIO() + pil_image.save(buffer, format='PNG') + base64_str = base64.b64encode(buffer.getvalue()).decode("utf-8") + return base64_str + + def post_process_func(self, response): + resp = response.split('\\boxed{')[-1] + lt = len(resp) + counter, end = 1, None + for i in range(lt): + if resp[i] == '{': + counter += 1 + elif resp[i] == '}': + counter -= 1 + if counter == 0: + end = i + break + elif i == lt - 1: + end = lt + break + if end is not None: + response = resp[:end] + return response + + def prompt_to_request_messages(self, inputs): + + messages = [ + {'role': 'user', 'content': []} + ] + for x in inputs: + if x['type'] == 'text': + messages[-1]['content'].append( + {"type": "text", "text": x['value']}, + ) + if x['type'] == 'image': + _url = self.encode_image(Image.open(x['value'])) + messages[-1]['content'].append( + {"type": "image_url", "image_url": {"url": f"data:image/jpeg;base64,{_url}"}}, + ) + else: + continue + + PROMPT = ( + "First thinks about the reasoning process in the mind and then provides the user with the answer. " + "Put your final answer within \\boxed{}. " + "The response of reasoning and answer are formatted in reasoning \\boxed{answer here} .\n" + ) + + if self.use_reasoning_prompt: + for content in messages[0]['content']: + if content['type'] == 'text': + content['text'] = PROMPT + content['text'] + break + + return messages + + def generate_inner(self, inputs, **kwargs) -> str: + messages = self.prompt_to_request_messages(inputs) + if self.verbose: + verbose_messages = copy.deepcopy(messages) + for mess in verbose_messages: + if mess['role'] == 'user': + for content in mess['content']: + if content['type'] == 'image_url': + content['image_url']['url'] = '' + print(f'\033[31m{verbose_messages}\033[0m') + + data = { + "model": self.model, + "messages": messages, + "max_tokens": self.max_tokens, + "temperature": 0, + "top_p": 0.8, + "stream": False, + "repetition_penalty": 1.0 + } + + headers = { + 'Authorization': f"Bearer {self.api_key}", + 'Content-Type': 'application/json' + } + + try: + chat_response = requests.post(self.api_url, json=data, headers=headers) + response = ChatResponse(json.loads(chat_response.content)) + result = response.choices[0].message.content + if self.post_process: + result = self.post_process_func(result) + if self.verbose: + print(f'\033[32m{result}\033[0m') + + return 0, result, 'Succeeded! ' + except Exception as err: + if self.verbose: + self.logger.error(f'{type(err)}: {err}') + self.logger.error(f'The input messages are {inputs}.') + return -1, '', '' + + +class TaichuVLRAPI(TaichuVLRWrapper): + + def generate(self, message, dataset=None): + return super(TaichuVLRAPI, self).generate(message, dataset=dataset) \ No newline at end of file diff --git a/vlmeval/api/taiyi.py b/vlmeval/api/taiyi.py new file mode 100644 index 0000000000000000000000000000000000000000..3e069c43c05150b27702448e28520fc0b00b7af4 --- /dev/null +++ b/vlmeval/api/taiyi.py @@ -0,0 +1,192 @@ +from vlmeval.smp import * +from vlmeval.api.base import BaseAPI +from vlmeval.dataset import DATASET_TYPE, img_root_map + + +class TaiyiWrapper(BaseAPI): + + is_api: bool = True + + def __init__(self, + model: str = 'taiyi', + retry: int = 5, + wait: int = 5, + key: str = None, + verbose: bool = False, + system_prompt: str = None, + temperature: float = 0, + timeout: int = 60, + url: str = "https://taiyi.megvii.com/v1/chat/completions", + max_tokens: int = 1024, + **kwargs): + + self.model = model + self.fail_msg = 'Failed to obtain answer via API. ' + self.max_tokens = max_tokens + self.temperature = temperature + + if key is None: + key = os.environ.get('TAIYI_API_KEY', None) + assert key is not None, ('Please set the API Key ') + self.key = key + + self.timeout = timeout + super().__init__(wait=wait, retry=retry, system_prompt=system_prompt, verbose=verbose, **kwargs) + assert url is not None, ('Please set the url ') + self.url = url + self.logger.info(f'Using url: {self.url}; API Key: {self.key}') + + def use_custom_prompt(self, dataset): + if DATASET_TYPE(dataset) == 'Y/N' or DATASET_TYPE(dataset) == 'MCQ' or DATASET_TYPE(dataset) == 'VQA': + return True + return False + + def prepare_inputs(self, inputs): + input_msgs = [] + if self.system_prompt is not None: + input_msgs.append(dict(role='system', content=self.system_prompt)) + has_images = np.sum([x['type'] == 'image' for x in inputs]) + if has_images: + content_list = [] + for msg in inputs: + if msg['type'] == 'text': + content_list.append(dict(type='text', text=msg['value'])) + elif msg['type'] == 'image': + imgbytes = open(msg['value'],'rb').read() + b64 = base64.b64encode(imgbytes).decode('ascii') + img_struct = dict(url=f'data:image/jpeg;base64,{b64}') + content_list.append(dict(type='image_url', image_url=img_struct)) + input_msgs.append(dict(role='user', content=content_list)) + else: + assert all([x['type'] == 'text' for x in inputs]) + text = '\n'.join([x['value'] for x in inputs]) + input_msgs.append(dict(role='user', content=text)) + return input_msgs + + def set_dump_image(self, dump_image_func): + self.dump_image_func = dump_image_func + + def dump_image(self, line, dataset): + return self.dump_image_func(line) + + def image_first(self, msgs): + nr_img = 0 + for s in msgs: + if s['type'] == 'image': + nr_img += 1 + + if nr_img == 1: + new_msgs = [] + img_msg = None + for s in msgs: + if s['type'] == 'text': + new_msgs.append(s) + else: + img_msg = s + new_msgs.insert(0, img_msg) + else: + new_msgs = msgs + + return new_msgs + + def build_multi_choice_prompt(self, line, dataset=None): + question = line['question'] + hint = line['hint'] if ('hint' in line and not pd.isna(line['hint'])) else None + if hint is not None: + question = hint + '\n' + question + + options = { + cand: line[cand] + for cand in string.ascii_uppercase + if cand in line and not pd.isna(line[cand]) + } + for key, item in options.items(): + question += f'\n{key}. {item}' + prompt = question + + if len(options): + prompt += '\n请直接回答选项字母。' if cn_string( + prompt) else "\nAnswer with the option's letter from the given choices directly." + else: + prompt += '\n请直接回答问题。' if cn_string(prompt) else '\nAnswer the question directly.' + + return prompt + + def build_yorn_prompt(self, line, dataset=None): + if listinstr(['HallusionBench'], dataset): + pre_prompt = 'Read the following question carefully, think and solve it step by step.\n\n' + else: + pre_prompt = '' + + prompt = pre_prompt + line['question'] + ' Please answer yes or no as the final answer.' + + return prompt + + def build_vqa_prompt(self, line, dataset=None): + if listinstr(['OCRBench'], dataset): + pre_prompt = 'Carefully identify the text in the image and answer the question.\n\n' + else: + pre_prompt = '' + + if listinstr(['MMVet'], dataset): + post_prompt = '\nAnswer this question in detail.' + else: + post_prompt = '' + + prompt = pre_prompt + line['question'] + post_prompt + + return prompt + + def build_prompt(self, line, dataset=None): + assert self.use_custom_prompt(dataset) + assert dataset is None or isinstance(dataset, str) + tgt_path = self.dump_image(line, dataset) + + if DATASET_TYPE(dataset) == 'MCQ': + prompt = self.build_multi_choice_prompt(line, dataset) + elif DATASET_TYPE(dataset) == 'Y/N': + prompt = self.build_yorn_prompt(line, dataset) + elif DATASET_TYPE(dataset) == 'VQA': + prompt = self.build_vqa_prompt(line, dataset) + else: + raise RuntimeError(f'Invalid dataset type: {DATASET_TYPE(dataset)}') + message = [] + message.extend([dict(type='image', value=s) for s in tgt_path]) + message.extend([dict(type='text', value=prompt)]) + + # interleave dataset + if dataset.startswith('MMMU_'): + from .. import MMMUDataset + message = MMMUDataset.split_MMMU(message) + message = self.image_first(message) + + return message + + def generate_inner(self, inputs, **kwargs) -> str: + + input_msgs = self.prepare_inputs(inputs) + temperature = kwargs.pop('temperature', self.temperature) + + headers = {'Authorization': f'Bearer {self.key}'} + payload = dict( + model=self.model, + messages=input_msgs, + n=1, + temperature=temperature, + **kwargs) + response = requests.post(self.url, headers=headers, data=json.dumps(payload), timeout=self.timeout * 1.1) + ret_code = response.status_code + ret_code = 0 if (200 <= int(ret_code) < 300) else ret_code + answer = self.fail_msg + try: + resp_struct = json.loads(response.text) + answer = resp_struct['choices'][0]['message']['content'].strip() + except: + pass + return ret_code, answer, response + + +class TaiyiAPI(TaiyiWrapper): + + def generate(self, message, dataset=None): + return super(TaiyiAPI, self).generate(message) diff --git a/vlmeval/config.py b/vlmeval/config.py new file mode 100644 index 0000000000000000000000000000000000000000..9a1889b77237a51b3a028faa6585d684d834c63f --- /dev/null +++ b/vlmeval/config.py @@ -0,0 +1,1281 @@ +from vlmeval.vlm import * +from vlmeval.api import * +from functools import partial + +PandaGPT_ROOT = None +MiniGPT4_ROOT = None +TransCore_ROOT = None +Yi_ROOT = None +OmniLMM_ROOT = None +Mini_Gemini_ROOT = None +VXVERSE_ROOT = None +VideoChat2_ROOT = None +VideoChatGPT_ROOT = None +PLLaVA_ROOT = None +RBDash_ROOT = None +VITA_ROOT = None +LLAVA_V1_7B_MODEL_PTH = "Please set your local path to LLaVA-7B-v1.1 here, the model weight is obtained by merging LLaVA delta weight based on vicuna-7b-v1.1 in https://github.com/haotian-liu/LLaVA/blob/main/docs/MODEL_ZOO.md with vicuna-7b-v1.1. " + +video_models = { + "Video-LLaVA-7B": partial(VideoLLaVA, model_path="LanguageBind/Video-LLaVA-7B"), + "Video-LLaVA-7B-HF": partial( + VideoLLaVA_HF, model_path="LanguageBind/Video-LLaVA-7B-hf" + ), + "VideoChat2-HD": partial( + VideoChat2_HD, + model_path="OpenGVLab/VideoChat2_HD_stage4_Mistral_7B", + root=VideoChat2_ROOT, + config_file="./vlmeval/vlm/video_llm/configs/videochat2_hd.json", + ), + "Chat-UniVi-7B": partial(Chatunivi, model_path="Chat-UniVi/Chat-UniVi"), + "Chat-UniVi-7B-v1.5": partial( + Chatunivi, model_path="Chat-UniVi/Chat-UniVi-7B-v1.5" + ), + "LLaMA-VID-7B": partial( + LLaMAVID, model_path="YanweiLi/llama-vid-7b-full-224-video-fps-1" + ), + "Video-ChatGPT": partial( + VideoChatGPT, model_path="MBZUAI/Video-ChatGPT-7B", dir_root=VideoChatGPT_ROOT + ), + "PLLaVA-7B": partial(PLLaVA, model_path="ermu2001/pllava-7b", dir_root=PLLaVA_ROOT), + "PLLaVA-13B": partial( + PLLaVA, model_path="ermu2001/pllava-13b", dir_root=PLLaVA_ROOT + ), + "PLLaVA-34B": partial( + PLLaVA, model_path="ermu2001/pllava-34b", dir_root=PLLaVA_ROOT + ), +} + +ungrouped = { + "AKI": partial(AKI, name="AKI", ckpt_pth="Sony/AKI-4B-phi-3.5-mini"), + "TransCore_M": partial(TransCoreM, root=TransCore_ROOT), + "PandaGPT_13B": partial(PandaGPT, name="PandaGPT_13B", root=PandaGPT_ROOT), + "flamingov2": partial( + OpenFlamingo, + name="v2", + mpt_pth="anas-awadalla/mpt-7b", + ckpt_pth="openflamingo/OpenFlamingo-9B-vitl-mpt7b", + ), + "VisualGLM_6b": partial(VisualGLM, model_path="THUDM/visualglm-6b"), + "mPLUG-Owl2": partial(mPLUG_Owl2, model_path="MAGAer13/mplug-owl2-llama2-7b"), + "mPLUG-Owl3": partial(mPLUG_Owl3, model_path="mPLUG/mPLUG-Owl3-7B-240728"), + "OmniLMM_12B": partial( + OmniLMM12B, model_path="openbmb/OmniLMM-12B", root=OmniLMM_ROOT + ), + "MGM_7B": partial( + Mini_Gemini, model_path="YanweiLi/MGM-7B-HD", root=Mini_Gemini_ROOT + ), + "Bunny-llama3-8B": partial(BunnyLLama3, model_path="BAAI/Bunny-v1_1-Llama-3-8B-V"), + "VXVERSE": partial(VXVERSE, model_name="XVERSE-V-13B", root=VXVERSE_ROOT), + "360VL-70B": partial(QH_360VL, model_path="qihoo360/360VL-70B"), + "Llama-3-MixSenseV1_1": partial( + LLama3Mixsense, model_path="Zero-Vision/Llama-3-MixSenseV1_1" + ), + "Parrot": partial(Parrot, model_path="AIDC-AI/Parrot-7B"), + "OmChat": partial(OmChat, model_path="omlab/omchat-v2.0-13B-single-beta_hf"), + "RBDash_72b": partial( + RBDash, model_path="RBDash-Team/RBDash-v1.5", root=RBDash_ROOT + ), + "Pixtral-12B": partial(Pixtral, model_path="mistralai/Pixtral-12B-2409"), + "Falcon2-VLM-11B": partial(Falcon2VLM, model_path="tiiuae/falcon-11B-vlm"), +} + +o1_key = 'XXX' # noqa: E501 +o1_apis = { + 'o1': partial( + GPT4V, + model="o1-2024-12-17", + key=o1_key, + api_base='OFFICIAL', + temperature=0, + img_detail='high', + retry=10, + verbose=False, + ), +} + +api_models = { + # GPT + "GPT4V": partial( + GPT4V, + model="gpt-4-1106-vision-preview", + temperature=0, + img_size=512, + img_detail="low", + retry=10, + verbose=False, + ), + "GPT4V_HIGH": partial( + GPT4V, + model="gpt-4-1106-vision-preview", + temperature=0, + img_size=-1, + img_detail="high", + retry=10, + verbose=False, + ), + "GPT4V_20240409": partial( + GPT4V, + model="gpt-4-turbo-2024-04-09", + temperature=0, + img_size=512, + img_detail="low", + retry=10, + verbose=False, + ), + "GPT4V_20240409_HIGH": partial( + GPT4V, + model="gpt-4-turbo-2024-04-09", + temperature=0, + img_size=-1, + img_detail="high", + retry=10, + verbose=False, + ), + "GPT4o": partial( + GPT4V, + model="gpt-4o-2024-05-13", + temperature=0, + img_size=512, + img_detail="low", + retry=10, + verbose=False, + ), + "GPT4o_HIGH": partial( + GPT4V, + model="gpt-4o-2024-05-13", + temperature=0, + img_size=-1, + img_detail="high", + retry=10, + verbose=False, + ), + "GPT4o_20240806": partial( + GPT4V, + model="gpt-4o-2024-08-06", + temperature=0, + img_size=-1, + img_detail="high", + retry=10, + verbose=False, + ), + "GPT4o_20241120": partial( + GPT4V, + model="gpt-4o-2024-11-20", + temperature=0, + img_size=-1, + img_detail="high", + retry=10, + verbose=False, + ), + "GPT4o_MINI": partial( + GPT4V, + model="gpt-4o-mini-2024-07-18", + temperature=0, + img_size=-1, + img_detail="high", + retry=10, + verbose=False, + ), + "GPT4.5": partial( + GPT4V, + model='gpt-4.5-preview-2025-02-27', + temperature=0, + timeout=600, + img_size=-1, + img_detail='high', + retry=10, + verbose=False, + ), + # Gemini + "GeminiPro1-0": partial( + GeminiProVision, model="gemini-1.0-pro", temperature=0, retry=10 + ), # now GeminiPro1-0 is only supported by vertex backend + "GeminiPro1-5": partial( + GeminiProVision, model="gemini-1.5-pro", temperature=0, retry=10 + ), + "GeminiFlash1-5": partial( + GeminiProVision, model="gemini-1.5-flash", temperature=0, retry=10 + ), + "GeminiFlash2-0": partial( + GeminiProVision, model="gemini-2.0-flash", temperature=0, retry=10 + ), + "GeminiPro2-0": partial( + GeminiProVision, model="gemini-2.0-pro-exp", temperature=0, retry=10 + ), + "GeminiPro1-5-002": partial( + GPT4V, model="gemini-1.5-pro-002", temperature=0, retry=10 + ), # Internal Use Only + "GeminiFlash1-5-002": partial( + GPT4V, model="gemini-1.5-flash-002", temperature=0, retry=10 + ), # Internal Use Only + # Qwen-VL + "QwenVLPlus": partial(QwenVLAPI, model="qwen-vl-plus", temperature=0, retry=10), + "QwenVLMax": partial(QwenVLAPI, model="qwen-vl-max", temperature=0, retry=10), + # Reka + "RekaEdge": partial(Reka, model="reka-edge-20240208"), + "RekaFlash": partial(Reka, model="reka-flash-20240226"), + "RekaCore": partial(Reka, model="reka-core-20240415"), + # Step1V + "Step1V": partial( + GPT4V, + model="step-1v-32k", + api_base="https://api.stepfun.com/v1/chat/completions", + temperature=0, + retry=10, + img_size=-1, + img_detail="high", + ), + "Step1.5V-mini": partial( + GPT4V, + model="step-1.5v-mini", + api_base="https://api.stepfun.com/v1/chat/completions", + temperature=0, + retry=10, + img_size=-1, + img_detail="high", + ), + "Step1o": partial( + GPT4V, + model="step-1o-vision-32k", + api_base="https://api.stepfun.com/v1/chat/completions", + temperature=0, + retry=10, + img_size=-1, + img_detail="high", + ), + # Yi-Vision + "Yi-Vision": partial( + GPT4V, + model="yi-vision", + api_base="https://api.lingyiwanwu.com/v1/chat/completions", + temperature=0, + retry=10, + ), + # Claude + "Claude3V_Opus": partial( + Claude3V, model="claude-3-opus-20240229", temperature=0, retry=10, verbose=False + ), + "Claude3V_Sonnet": partial( + Claude3V, + model="claude-3-sonnet-20240229", + temperature=0, + retry=10, + verbose=False, + ), + "Claude3V_Haiku": partial( + Claude3V, + model="claude-3-haiku-20240307", + temperature=0, + retry=10, + verbose=False, + ), + "Claude3-5V_Sonnet": partial( + Claude3V, + model="claude-3-5-sonnet-20240620", + temperature=0, + retry=10, + verbose=False, + ), + "Claude3-5V_Sonnet_20241022": partial( + Claude3V, + model="claude-3-5-sonnet-20241022", + temperature=0, + retry=10, + verbose=False, + ), + "Claude3-7V_Sonnet": partial( + Claude3V, + model="claude-3-7-sonnet-20250219", + temperature=0, + retry=10, + verbose=False, + ), + # GLM4V + "GLM4V": partial(GLMVisionAPI, model="glm4v-biz-eval", temperature=0, retry=10), + "GLM4V_PLUS": partial(GLMVisionAPI, model="glm-4v-plus", temperature=0, retry=10), + "GLM4V_PLUS_20250111": partial( + GLMVisionAPI, model="glm-4v-plus-0111", temperature=0, retry=10 + ), + # MiniMax abab + "abab6.5s": partial( + GPT4V, + model="abab6.5s-chat", + api_base="https://api.minimax.chat/v1/chat/completions", + temperature=0, + retry=10, + ), + "abab7-preview": partial( + GPT4V, + model="abab7-chat-preview", + api_base="https://api.minimax.chat/v1/chat/completions", + temperature=0, + retry=10, + ), + # CongRong + "CloudWalk": partial(CWWrapper, model="cw-congrong-v1.5", temperature=0, retry=10), + # SenseChat-V + "SenseChat-Vision": partial( + SenseChatVisionAPI, model="SenseChat-Vision", temperature=0, retry=10 + ), + "HunYuan-Vision": partial( + HunyuanVision, model="hunyuan-vision", temperature=0, retry=10 + ), + "HunYuan-Standard-Vision": partial( + HunyuanVision, model="hunyuan-standard-vision", temperature=0, retry=10 + ), + "BailingMM-Lite-1203": partial( + bailingMMAPI, model="BailingMM-Lite-1203", temperature=0, retry=10 + ), + "BailingMM-Pro-0120": partial( + bailingMMAPI, model="BailingMM-Pro-0120", temperature=0, retry=10 + ), + # BlueLM-V + "BlueLM_V": partial(BlueLM_V_API, model="BlueLM-VL-v3.0", temperature=0, retry=10), + # JiuTian-VL + "JTVL": partial(JTVLChatAPI, model="jt-vl-chat", temperature=0, retry=10), + "Taiyi": partial(TaiyiAPI, model="taiyi", temperature=0, retry=10), + # TeleMM + "TeleMM": partial(TeleMMAPI, model="TeleAI/TeleMM", temperature=0, retry=10), + # lmdeploy api + "lmdeploy": partial( + LMDeployAPI, + api_base="http://0.0.0.0:23333/v1/chat/completions", + temperature=0, + retry=10, + ), + "lmdeploy_internvl_78B_MPO": partial( + LMDeployAPI, + api_base="http://0.0.0.0:23333/v1/chat/completions", + temperature=0, + retry=10, + timeout=100, + ), + "lmdeploy_qvq_72B_preview": partial( + LMDeployAPI, + api_base="http://0.0.0.0:23333/v1/chat/completions", + temperature=0, + retry=10, + timeout=300, + ), + # Taichu-VL + # "Taichu-VL-2B": partial( + # TaichuVLAPI, + # model="Taichu-VL-2B", + # url="https://platform.wair.ac.cn/api/v1/infer/10381/v1/chat/completions", + # ), + 'Taichu-VLR-3B': partial( + TaichuVLRAPI, + model='taichu_vlr_3b', + url="https://platform.wair.ac.cn/maas/v1/chat/completions" + ), + 'Taichu-VLR-7B': partial( + TaichuVLRAPI, + model='taichu_vlr_7b', + url="https://platform.wair.ac.cn/maas/v1/chat/completions" + ), + # doubao_vl + "DoubaoVL": partial( + DoubaoVL, model="Doubao-1.5-vision-pro", temperature=0, retry=10, verbose=False + ), + # Shopee MUG-U + 'MUG-U-7B': partial( + MUGUAPI, + model='MUG-U', + temperature=0, + retry=10, + verbose=False, + timeout=300), + # grok + "grok-vision-beta": partial( + GPT4V, + model="grok-vision-beta", + api_base="https://api.x.ai/v1/chat/completions", + temperature=0, + retry=10, + ), + "grok-2-vision-1212": partial( + GPT4V, + model="grok-2-vision", + api_base="https://api.x.ai/v1/chat/completions", + temperature=0, + retry=10, + ), + # kimi + "moonshot-v1-8k": partial( + GPT4V, + model="moonshot-v1-8k-vision-preview", + api_base="https://api.moonshot.cn/v1/chat/completions", + temperature=0, + retry=10, + ), + "moonshot-v1-32k": partial( + GPT4V, + model="moonshot-v1-32k-vision-preview", + api_base="https://api.moonshot.cn/v1/chat/completions", + temperature=0, + retry=10, + ), + "moonshot-v1-128k": partial( + GPT4V, + model="moonshot-v1-128k-vision-preview", + api_base="https://api.moonshot.cn/v1/chat/completions", + temperature=0, + retry=10, + ), +} + +emu_series = { + "emu2_chat": partial(Emu, model_path="BAAI/Emu2-Chat"), + "emu3_chat": partial(Emu3_chat, model_path="BAAI/Emu3-Chat"), + "emu3_gen": partial(Emu3_gen, model_path="BAAI/Emu3-Gen"), +} +mmalaya_series = { + "MMAlaya": partial(MMAlaya, model_path="DataCanvas/MMAlaya"), + "MMAlaya2": partial(MMAlaya2, model_path="DataCanvas/MMAlaya2"), +} + +minicpm_series = { + "MiniCPM-V": partial(MiniCPM_V, model_path="openbmb/MiniCPM-V"), + "MiniCPM-V-2": partial(MiniCPM_V, model_path="openbmb/MiniCPM-V-2"), + "MiniCPM-Llama3-V-2_5": partial( + MiniCPM_Llama3_V, model_path="openbmb/MiniCPM-Llama3-V-2_5" + ), + "MiniCPM-V-2_6": partial(MiniCPM_V_2_6, model_path="openbmb/MiniCPM-V-2_6"), + "MiniCPM-o-2_6": partial(MiniCPM_o_2_6, model_path="openbmb/MiniCPM-o-2_6"), +} + +xtuner_series = { + "llava-internlm2-7b": partial( + LLaVA_XTuner, + llm_path="internlm/internlm2-chat-7b", + llava_path="xtuner/llava-internlm2-7b", + visual_select_layer=-2, + prompt_template="internlm2_chat", + ), + "llava-internlm2-20b": partial( + LLaVA_XTuner, + llm_path="internlm/internlm2-chat-20b", + llava_path="xtuner/llava-internlm2-20b", + visual_select_layer=-2, + prompt_template="internlm2_chat", + ), + "llava-internlm-7b": partial( + LLaVA_XTuner, + llm_path="internlm/internlm-chat-7b", + llava_path="xtuner/llava-internlm-7b", + visual_select_layer=-2, + prompt_template="internlm_chat", + ), + "llava-v1.5-7b-xtuner": partial( + LLaVA_XTuner, + llm_path="lmsys/vicuna-7b-v1.5", + llava_path="xtuner/llava-v1.5-7b-xtuner", + visual_select_layer=-2, + prompt_template="vicuna", + ), + "llava-v1.5-13b-xtuner": partial( + LLaVA_XTuner, + llm_path="lmsys/vicuna-13b-v1.5", + llava_path="xtuner/llava-v1.5-13b-xtuner", + visual_select_layer=-2, + prompt_template="vicuna", + ), + "llava-llama-3-8b": partial( + LLaVA_XTuner, + llm_path="xtuner/llava-llama-3-8b-v1_1", + llava_path="xtuner/llava-llama-3-8b-v1_1", + visual_select_layer=-2, + prompt_template="llama3_chat", + ), +} + +qwen_series = { + "qwen_base": partial(QwenVL, model_path="Qwen/Qwen-VL"), + "qwen_chat": partial(QwenVLChat, model_path="Qwen/Qwen-VL-Chat"), + "monkey": partial(Monkey, model_path="echo840/Monkey"), + "monkey-chat": partial(MonkeyChat, model_path="echo840/Monkey-Chat"), + "minimonkey": partial(MiniMonkey, model_path="mx262/MiniMonkey"), +} + +llava_series = { + "llava_v1.5_7b": partial(LLaVA, model_path="liuhaotian/llava-v1.5-7b"), + "llava_v1.5_13b": partial(LLaVA, model_path="liuhaotian/llava-v1.5-13b"), + "llava_v1_7b": partial(LLaVA, model_path=LLAVA_V1_7B_MODEL_PTH), + "sharegpt4v_7b": partial(LLaVA, model_path="Lin-Chen/ShareGPT4V-7B"), + "sharegpt4v_13b": partial(LLaVA, model_path="Lin-Chen/ShareGPT4V-13B"), + "llava_next_vicuna_7b": partial( + LLaVA_Next, model_path="llava-hf/llava-v1.6-vicuna-7b-hf" + ), + "llava_next_vicuna_13b": partial( + LLaVA_Next, model_path="llava-hf/llava-v1.6-vicuna-13b-hf" + ), + "llava_next_mistral_7b": partial( + LLaVA_Next, model_path="llava-hf/llava-v1.6-mistral-7b-hf" + ), + "llava_next_yi_34b": partial(LLaVA_Next, model_path="llava-hf/llava-v1.6-34b-hf"), + "llava_next_llama3": partial( + LLaVA_Next, model_path="llava-hf/llama3-llava-next-8b-hf" + ), + "llava_next_72b": partial(LLaVA_Next, model_path="llava-hf/llava-next-72b-hf"), + "llava_next_110b": partial(LLaVA_Next, model_path="llava-hf/llava-next-110b-hf"), + "llava_next_qwen_32b": partial( + LLaVA_Next2, model_path="lmms-lab/llava-next-qwen-32b" + ), + "llava_next_interleave_7b": partial( + LLaVA_Next, model_path="llava-hf/llava-interleave-qwen-7b-hf" + ), + "llava_next_interleave_7b_dpo": partial( + LLaVA_Next, model_path="llava-hf/llava-interleave-qwen-7b-dpo-hf" + ), + "llava-onevision-qwen2-0.5b-ov-hf": partial( + LLaVA_OneVision_HF, model_path="llava-hf/llava-onevision-qwen2-0.5b-ov-hf" + ), + "llava-onevision-qwen2-0.5b-si-hf": partial( + LLaVA_OneVision_HF, model_path="llava-hf/llava-onevision-qwen2-0.5b-si-hf" + ), + "llava-onevision-qwen2-7b-ov-hf": partial( + LLaVA_OneVision_HF, model_path="llava-hf/llava-onevision-qwen2-7b-ov-hf" + ), + "llava-onevision-qwen2-7b-si-hf": partial( + LLaVA_OneVision_HF, model_path="llava-hf/llava-onevision-qwen2-7b-si-hf" + ), + "llava_onevision_qwen2_0.5b_si": partial( + LLaVA_OneVision, model_path="lmms-lab/llava-onevision-qwen2-0.5b-si" + ), + "llava_onevision_qwen2_7b_si": partial( + LLaVA_OneVision, model_path="lmms-lab/llava-onevision-qwen2-7b-si" + ), + "llava_onevision_qwen2_72b_si": partial( + LLaVA_OneVision, model_path="lmms-lab/llava-onevision-qwen2-72b-si" + ), + "llava_onevision_qwen2_0.5b_ov": partial( + LLaVA_OneVision, model_path="lmms-lab/llava-onevision-qwen2-0.5b-ov" + ), + "llava_onevision_qwen2_7b_ov": partial( + LLaVA_OneVision, model_path="lmms-lab/llava-onevision-qwen2-7b-ov" + ), + "llava_onevision_qwen2_72b_ov": partial( + LLaVA_OneVision, model_path="lmms-lab/llava-onevision-qwen2-72b-ov-sft" + ), + "Aquila-VL-2B": partial(LLaVA_OneVision, model_path="BAAI/Aquila-VL-2B-llava-qwen"), + "llava_video_qwen2_7b": partial( + LLaVA_OneVision, model_path="lmms-lab/LLaVA-Video-7B-Qwen2" + ), + "llava_video_qwen2_72b": partial( + LLaVA_OneVision, model_path="lmms-lab/LLaVA-Video-72B-Qwen2" + ), + "varco-vision-hf": partial( + LLaVA_OneVision_HF, model_path="NCSOFT/VARCO-VISION-14B-HF" + ), +} + +vita_series = { + "vita": partial(VITA, model_path="VITA-MLLM/VITA", root=VITA_ROOT), + "vita_qwen2": partial(VITAQwen2, model_path="VITA-MLLM/VITA-1.5", root=VITA_ROOT), +} + +long_vita_series = { + "Long-VITA-16K": partial( + LongVITA, model_path="VITA-MLLM/Long-VITA-16K_HF", max_num_frame=128 + ), + "Long-VITA-128K": partial( + LongVITA, model_path="VITA-MLLM/Long-VITA-128K_HF", max_num_frame=256 + ), + "Long-VITA-1M": partial( + LongVITA, model_path="VITA-MLLM/Long-VITA-1M_HF", max_num_frame=256 + ), +} + +internvl_series = { + "InternVL-Chat-V1-1": partial( + InternVLChat, model_path="OpenGVLab/InternVL-Chat-V1-1", version="V1.1" + ), + "InternVL-Chat-V1-2": partial( + InternVLChat, model_path="OpenGVLab/InternVL-Chat-V1-2", version="V1.2" + ), + "InternVL-Chat-V1-2-Plus": partial( + InternVLChat, model_path="OpenGVLab/InternVL-Chat-V1-2-Plus", version="V1.2" + ), + # InternVL1.5 series + "InternVL-Chat-V1-5": partial( + InternVLChat, + model_path="OpenGVLab/InternVL-Chat-V1-5", + version="V1.5", + ), + "Mini-InternVL-Chat-2B-V1-5": partial( + InternVLChat, model_path="OpenGVLab/Mini-InternVL-Chat-2B-V1-5", version="V1.5" + ), + "Mini-InternVL-Chat-4B-V1-5": partial( + InternVLChat, model_path="OpenGVLab/Mini-InternVL-Chat-4B-V1-5", version="V1.5" + ), + # InternVL2 series + "InternVL2-1B": partial( + InternVLChat, model_path="OpenGVLab/InternVL2-1B", version="V2.0" + ), + "InternVL2-2B": partial( + InternVLChat, model_path="OpenGVLab/InternVL2-2B", version="V2.0" + ), + "InternVL2-4B": partial( + InternVLChat, model_path="OpenGVLab/InternVL2-4B", version="V2.0" + ), + "InternVL2-8B": partial( + InternVLChat, model_path="OpenGVLab/InternVL2-8B", version="V2.0" + ), + "InternVL2-26B": partial( + InternVLChat, model_path="OpenGVLab/InternVL2-26B", version="V2.0" + ), + "InternVL2-40B": partial( + InternVLChat, model_path="OpenGVLab/InternVL2-40B", version="V2.0" + ), + "InternVL2-76B": partial( + InternVLChat, model_path="OpenGVLab/InternVL2-Llama3-76B", version="V2.0" + ), + # InternVL2 MPO series + "InternVL2-8B-MPO": partial( + InternVLChat, model_path="OpenGVLab/InternVL2-8B-MPO", version="V2.0" + ), + "InternVL2-8B-MPO-CoT": partial( + InternVLChat, + model_path="OpenGVLab/InternVL2-8B-MPO", + version="V2.0", + use_mpo_prompt=True, + ), + # InternVL2.5 series + "InternVL2_5-1B": partial( + InternVLChat, model_path="OpenGVLab/InternVL2_5-1B", version="V2.0" + ), + "InternVL2_5-2B": partial( + InternVLChat, model_path="OpenGVLab/InternVL2_5-2B", version="V2.0" + ), + "InternVL2_5-4B": partial( + InternVLChat, model_path="OpenGVLab/InternVL2_5-4B", version="V2.0" + ), + "InternVL2_5-8B": partial( + InternVLChat, model_path="OpenGVLab/InternVL2_5-8B", version="V2.0" + ), + "InternVL2_5-26B": partial( + InternVLChat, model_path="OpenGVLab/InternVL2_5-26B", version="V2.0" + ), + "InternVL2_5-38B": partial( + InternVLChat, model_path="OpenGVLab/InternVL2_5-38B", version="V2.0" + ), + "InternVL2_5-78B": partial( + InternVLChat, model_path="OpenGVLab/InternVL2_5-78B", version="V2.0" + ), + # InternVL2.5 series with Best-of-N evaluation + "InternVL2_5-8B-BoN-8": partial( + InternVLChat, model_path="OpenGVLab/InternVL2_5-8B", version="V2.0", + best_of_n=8, reward_model_path="OpenGVLab/VisualPRM-8B", + ), + # InternVL2.5-MPO series + "InternVL2_5-1B-MPO": partial( + InternVLChat, + model_path="OpenGVLab/InternVL2_5-1B-MPO", + version="V2.0", + use_mpo_prompt=True, + ), + "InternVL2_5-2B-MPO": partial( + InternVLChat, + model_path="OpenGVLab/InternVL2_5-2B-MPO", + version="V2.0", + use_mpo_prompt=True, + ), + "InternVL2_5-4B-MPO": partial( + InternVLChat, + model_path="OpenGVLab/InternVL2_5-4B-MPO", + version="V2.0", + use_mpo_prompt=True, + ), + "InternVL2_5-8B-MPO": partial( + InternVLChat, + model_path="OpenGVLab/InternVL2_5-8B-MPO", + version="V2.0", + use_mpo_prompt=True, + ), + "InternVL2_5-26B-MPO": partial( + InternVLChat, + model_path="OpenGVLab/InternVL2_5-26B-MPO", + version="V2.0", + use_mpo_prompt=True, + ), + "InternVL2_5-38B-MPO": partial( + InternVLChat, + model_path="OpenGVLab/InternVL2_5-38B-MPO", + version="V2.0", + use_mpo_prompt=True, + ), + "InternVL2_5-78B-MPO": partial( + InternVLChat, + model_path="OpenGVLab/InternVL2_5-78B-MPO", + version="V2.0", + use_mpo_prompt=True, + ), +} + +sail_series = { + "SAIL-VL-2B": partial(SailVL, model_path="BytedanceDouyinContent/SAIL-VL-2B") +} + +yivl_series = { + "Yi_VL_6B": partial(Yi_VL, model_path="01-ai/Yi-VL-6B", root=Yi_ROOT), + "Yi_VL_34B": partial(Yi_VL, model_path="01-ai/Yi-VL-34B", root=Yi_ROOT), +} + +xcomposer_series = { + "XComposer": partial(XComposer, model_path="internlm/internlm-xcomposer-vl-7b"), + "sharecaptioner": partial(ShareCaptioner, model_path="Lin-Chen/ShareCaptioner"), + "XComposer2": partial(XComposer2, model_path="internlm/internlm-xcomposer2-vl-7b"), + "XComposer2_1.8b": partial( + XComposer2, model_path="internlm/internlm-xcomposer2-vl-1_8b" + ), + "XComposer2_4KHD": partial( + XComposer2_4KHD, model_path="internlm/internlm-xcomposer2-4khd-7b" + ), + "XComposer2d5": partial( + XComposer2d5, model_path="internlm/internlm-xcomposer2d5-7b" + ), +} + +minigpt4_series = { + "MiniGPT-4-v2": partial(MiniGPT4, mode="v2", root=MiniGPT4_ROOT), + "MiniGPT-4-v1-7B": partial(MiniGPT4, mode="v1_7b", root=MiniGPT4_ROOT), + "MiniGPT-4-v1-13B": partial(MiniGPT4, mode="v1_13b", root=MiniGPT4_ROOT), +} + +idefics_series = { + "idefics_9b_instruct": partial( + IDEFICS, model_path="HuggingFaceM4/idefics-9b-instruct" + ), + "idefics_80b_instruct": partial( + IDEFICS, model_path="HuggingFaceM4/idefics-80b-instruct" + ), + "idefics2_8b": partial(IDEFICS2, model_path="HuggingFaceM4/idefics2-8b"), + # Idefics3 follows Idefics2 Pattern + "Idefics3-8B-Llama3": partial( + IDEFICS2, model_path="HuggingFaceM4/Idefics3-8B-Llama3" + ), +} + +smolvlm_series = { + "SmolVLM-256M": partial(SmolVLM, model_path="HuggingFaceTB/SmolVLM-256M-Instruct"), + "SmolVLM-500M": partial(SmolVLM, model_path="HuggingFaceTB/SmolVLM-500M-Instruct"), + "SmolVLM": partial(SmolVLM, model_path="HuggingFaceTB/SmolVLM-Instruct"), + "SmolVLM-DPO": partial(SmolVLM, model_path="HuggingFaceTB/SmolVLM-Instruct-DPO"), + "SmolVLM-Synthetic": partial(SmolVLM, model_path="HuggingFaceTB/SmolVLM-Synthetic"), + "SmolVLM2-256M": partial( + SmolVLM2, model_path="HuggingFaceTB/SmolVLM2-256M-Video-Instruct" + ), + "SmolVLM2-500M": partial( + SmolVLM2, model_path="HuggingFaceTB/SmolVLM2-500M-Video-Instruct" + ), + "SmolVLM2": partial(SmolVLM2, model_path="HuggingFaceTB/SmolVLM2-2.2B-Instruct"), +} + +instructblip_series = { + "instructblip_7b": partial(InstructBLIP, name="instructblip_7b"), + "instructblip_13b": partial(InstructBLIP, name="instructblip_13b"), +} + +deepseekvl_series = { + "deepseek_vl_7b": partial(DeepSeekVL, model_path="deepseek-ai/deepseek-vl-7b-chat"), + "deepseek_vl_1.3b": partial( + DeepSeekVL, model_path="deepseek-ai/deepseek-vl-1.3b-chat" + ), +} + +deepseekvl2_series = { + "deepseek_vl2_tiny": partial( + DeepSeekVL2, model_path="deepseek-ai/deepseek-vl2-tiny" + ), + "deepseek_vl2_small": partial( + DeepSeekVL2, model_path="deepseek-ai/deepseek-vl2-small" + ), + "deepseek_vl2": partial(DeepSeekVL2, model_path="deepseek-ai/deepseek-vl2"), +} + +janus_series = { + "Janus-1.3B": partial(Janus, model_path="deepseek-ai/Janus-1.3B"), + "Janus-Pro-7B": partial(Janus, model_path="deepseek-ai/Janus-Pro-7B"), +} + +cogvlm_series = { + "cogvlm-grounding-generalist": partial( + CogVlm, + model_path="THUDM/cogvlm-grounding-generalist-hf", + tokenizer_name="lmsys/vicuna-7b-v1.5", + ), + "cogvlm-chat": partial( + CogVlm, model_path="THUDM/cogvlm-chat-hf", tokenizer_name="lmsys/vicuna-7b-v1.5" + ), + "cogvlm2-llama3-chat-19B": partial( + CogVlm, model_path="THUDM/cogvlm2-llama3-chat-19B" + ), + "glm-4v-9b": partial(GLM4v, model_path="THUDM/glm-4v-9b"), +} + +wemm_series = { + "WeMM": partial(WeMM, model_path="feipengma/WeMM"), +} + +cambrian_series = { + "cambrian_8b": partial(Cambrian, model_path="nyu-visionx/cambrian-8b"), + "cambrian_13b": partial(Cambrian, model_path="nyu-visionx/cambrian-13b"), + "cambrian_34b": partial(Cambrian, model_path="nyu-visionx/cambrian-34b"), +} + +chameleon_series = { + "chameleon_7b": partial(Chameleon, model_path="facebook/chameleon-7b"), + "chameleon_30b": partial(Chameleon, model_path="facebook/chameleon-30b"), +} + +vila_series = { + "VILA1.5-3b": partial(VILA, model_path="Efficient-Large-Model/VILA1.5-3b"), + "Llama-3-VILA1.5-8b": partial( + VILA, model_path="Efficient-Large-Model/Llama-3-VILA1.5-8b" + ), + "VILA1.5-13b": partial(VILA, model_path="Efficient-Large-Model/VILA1.5-13b"), + "VILA1.5-40b": partial(VILA, model_path="Efficient-Large-Model/VILA1.5-40b"), +} + +ovis_series = { + "Ovis1.5-Llama3-8B": partial(Ovis, model_path="AIDC-AI/Ovis1.5-Llama3-8B"), + "Ovis1.5-Gemma2-9B": partial(Ovis, model_path="AIDC-AI/Ovis1.5-Gemma2-9B"), + "Ovis1.6-Gemma2-9B": partial(Ovis1_6, model_path="AIDC-AI/Ovis1.6-Gemma2-9B"), + "Ovis1.6-Llama3.2-3B": partial(Ovis1_6, model_path="AIDC-AI/Ovis1.6-Llama3.2-3B"), + "Ovis1.6-Gemma2-27B": partial( + Ovis1_6_Plus, model_path="AIDC-AI/Ovis1.6-Gemma2-27B" + ), + "Ovis2-1B": partial(Ovis2, model_path="AIDC-AI/Ovis2-1B"), + "Ovis2-2B": partial(Ovis2, model_path="AIDC-AI/Ovis2-2B"), + "Ovis2-4B": partial(Ovis2, model_path="AIDC-AI/Ovis2-4B"), + "Ovis2-8B": partial(Ovis2, model_path="AIDC-AI/Ovis2-8B"), + "Ovis2-16B": partial(Ovis2, model_path="AIDC-AI/Ovis2-16B"), + "Ovis2-34B": partial(Ovis2, model_path="AIDC-AI/Ovis2-34B"), +} + +mantis_series = { + "Mantis-8B-siglip-llama3": partial( + Mantis, model_path="TIGER-Lab/Mantis-8B-siglip-llama3" + ), + "Mantis-8B-clip-llama3": partial( + Mantis, model_path="TIGER-Lab/Mantis-8B-clip-llama3" + ), + "Mantis-8B-Idefics2": partial(Mantis, model_path="TIGER-Lab/Mantis-8B-Idefics2"), + "Mantis-8B-Fuyu": partial(Mantis, model_path="TIGER-Lab/Mantis-8B-Fuyu"), +} + +phi3_series = { + "Phi-3-Vision": partial( + Phi3Vision, model_path="microsoft/Phi-3-vision-128k-instruct" + ), + "Phi-3.5-Vision": partial( + Phi3_5Vision, model_path="microsoft/Phi-3.5-vision-instruct" + ), +} + +phi4_series = { + 'Phi-4-Vision': partial(Phi4Multimodal, model_path='microsoft/Phi-4-multimodal-instruct'), +} + +xgen_mm_series = { + "xgen-mm-phi3-interleave-r-v1.5": partial( + XGenMM, model_path="Salesforce/xgen-mm-phi3-mini-instruct-interleave-r-v1.5" + ), + "xgen-mm-phi3-dpo-r-v1.5": partial( + XGenMM, model_path="Salesforce/xgen-mm-phi3-mini-instruct-dpo-r-v1.5" + ), +} + +qwen2vl_series = { + "Qwen2-VL-7B-560": partial( + Qwen2VLChat, + model_path="/root/xuqixin/reinforce_perception/model/checkpoint-560", + min_pixels=3136, + max_pixels=12845056, + ), + "Qwen2-VL-7B-mix": partial( + Qwen2VLChat, + model_path="/root/kongly/project/Qwen-SFT/saves/qwen2_vl-7b/full/sft", + min_pixels=3136, + max_pixels=12845056, + use_reasoning_prompt=1, + use_vllm=True, + do_sample=True, + max_new_tokens=2048, + top_p=1.0, + top_k=-1, + temperature=1.0, + ), + "Qwen2-VL-7B-box-mix": partial( + Qwen2VLChat, + model_path="/root/kongly/project/Qwen-SFT/saves/qwen2_vl-7b/full/sft-box", + min_pixels=3136, + max_pixels=12845056, + use_reasoning_prompt=1, + use_vllm=True, + do_sample=True, + max_new_tokens=2048, + top_p=1.0, + top_k=-1, + temperature=1.0, + ), + "Qwen2-VL-7B-GRPO-2000": partial( + Qwen2VLChat, + model_path="/root/xuqixin/reinforce_perception/model/r1_distilled_grpo_step2500", + min_pixels=3136, + max_pixels=12845056, + use_reasoning_prompt=0, + use_vllm=True, + do_sample=True, + max_new_tokens=2048, + top_p=1.0, + top_k=-1, + temperature=1.0, + ), + "Qwen2-VL-7B-GRPO-2000-Prompt": partial( + Qwen2VLChat, + model_path="/root/xuqixin/reinforce_perception/model/r1_distilled_grpo_step2500", + min_pixels=3136, + max_pixels=12845056, + use_reasoning_prompt=2, + use_vllm=True, + do_sample=True, + max_new_tokens=2048, + top_p=1.0, + top_k=-1, + temperature=1.0, + system_prompt="You are a helpful assistant.", + ), + "Qwen2-VL-7B-GRPO-2000-HF": partial( + Qwen2VLChat, + model_path="/root/xuqixin/reinforce_perception/model/r1_distilled_grpo_step2500", + min_pixels=3136, + max_pixels=12845056, + use_reasoning_prompt=1, + use_vllm=False, + do_sample=True, + max_new_tokens=2048, + top_p=1.0, + top_k=100, + temperature=1.0, + ), + "Qwen-VL-Max-0809": partial( + Qwen2VLAPI, + model="qwen-vl-max-0809", + min_pixels=1280 * 28 * 28, + max_pixels=16384 * 28 * 28, + ), + "Qwen-VL-Plus-0809": partial( + Qwen2VLAPI, + model="qwen-vl-plus-0809", + min_pixels=1280 * 28 * 28, + max_pixels=16384 * 28 * 28, + ), + "QVQ-72B-Preview": partial( + Qwen2VLChat, + model_path="Qwen/QVQ-72B-Preview", + min_pixels=1280 * 28 * 28, + max_pixels=16384 * 28 * 28, + system_prompt="You are a helpful and harmless assistant. You are Qwen developed by Alibaba. You should think step-by-step.", + max_new_tokens=8192, + post_process=False, + ), + "Qwen2-VL-72B-Instruct": partial( + Qwen2VLChat, + model_path="Qwen/Qwen2-VL-72B-Instruct", + min_pixels=1280 * 28 * 28, + max_pixels=16384 * 28 * 28, + ), + "Qwen2-VL-7B-Instruct": partial( + Qwen2VLChat, + model_path="Qwen/Qwen2-VL-7B-Instruct", + min_pixels=1280 * 28 * 28, + max_pixels=16384 * 28 * 28, + ), + "Qwen2-VL-7B-Instruct-AWQ": partial( + Qwen2VLChat, + model_path="Qwen/Qwen2-VL-7B-Instruct-AWQ", + min_pixels=1280 * 28 * 28, + max_pixels=16384 * 28 * 28, + ), + "Qwen2-VL-7B-Instruct-GPTQ-Int4": partial( + Qwen2VLChat, + model_path="Qwen/Qwen2-VL-7B-Instruct-GPTQ-Int4", + min_pixels=1280 * 28 * 28, + max_pixels=16384 * 28 * 28, + ), + "Qwen2-VL-7B-Instruct-GPTQ-Int8": partial( + Qwen2VLChat, + model_path="Qwen/Qwen2-VL-7B-Instruct-GPTQ-Int8", + min_pixels=1280 * 28 * 28, + max_pixels=16384 * 28 * 28, + ), + "Qwen2-VL-2B-Instruct": partial( + Qwen2VLChat, + model_path="Qwen/Qwen2-VL-2B-Instruct", + min_pixels=1280 * 28 * 28, + max_pixels=16384 * 28 * 28, + ), + "Qwen2-VL-2B-Instruct-AWQ": partial( + Qwen2VLChat, + model_path="Qwen/Qwen2-VL-2B-Instruct-AWQ", + min_pixels=1280 * 28 * 28, + max_pixels=16384 * 28 * 28, + ), + "Qwen2-VL-2B-Instruct-GPTQ-Int4": partial( + Qwen2VLChat, + model_path="Qwen/Qwen2-VL-2B-Instruct-GPTQ-Int4", + min_pixels=1280 * 28 * 28, + max_pixels=16384 * 28 * 28, + ), + "Qwen2-VL-2B-Instruct-GPTQ-Int8": partial( + Qwen2VLChat, + model_path="Qwen/Qwen2-VL-2B-Instruct-GPTQ-Int8", + min_pixels=1280 * 28 * 28, + max_pixels=16384 * 28 * 28, + ), + "XinYuan-VL-2B-Instruct": partial( + Qwen2VLChat, + model_path="Cylingo/Xinyuan-VL-2B", + min_pixels=1280 * 28 * 28, + max_pixels=16384 * 28 * 28, + ), + "Qwen2.5-VL-3B-Instruct": partial( + Qwen2VLChat, + model_path="Qwen/Qwen2.5-VL-3B-Instruct", + min_pixels=1280 * 28 * 28, + max_pixels=16384 * 28 * 28, + use_custom_prompt=False, + ), + "Qwen2.5-VL-3B-Instruct-AWQ": partial( + Qwen2VLChat, + model_path="Qwen/Qwen2.5-VL-3B-Instruct-AWQ", + min_pixels=1280 * 28 * 28, + max_pixels=16384 * 28 * 28, + use_custom_prompt=False, + ), + "Qwen2.5-VL-7B-Instruct-original": partial( + Qwen2VLChat, + model_path="/user/yaoshu/models/Qwen_25_VL", + min_pixels=1280 * 28 * 28, + max_pixels=16384 * 28 * 28, + use_custom_prompt=False, + use_vllm=True, + ), + "Qwen2.5-VL-7B-RL": partial( + Qwen2VLChat, + model_path="/user/xuqixin/checkpoints/qwen25_vl-7b/grpo_v7_exp0_qwen25vl_grpo_opensource_math_onlinefilter_regen/global_step_300/actor/huggingface", + min_pixels=1280 * 28 * 28, + max_pixels=16384 * 28 * 28, + use_custom_prompt=False, + use_vllm=True, + use_reasoning_prompt=2, + do_sample=True, + temperature=1, + top_p=1, + top_k=-1, + ), + "Qwen2.5-VL-7B-RL-greedy": partial( + Qwen2VLChat, + model_path="/user/xuqixin/checkpoints/qwen25_vl-7b/grpo_v7_exp0_qwen25vl_grpo_opensource_math_onlinefilter_regen/global_step_300/actor/huggingface", + min_pixels=1280 * 28 * 28, + max_pixels=16384 * 28 * 28, + use_custom_prompt=False, + use_vllm=True, + use_reasoning_prompt=2, + do_sample=True + ), + "Qwen2.5-VL-7B-RL-raw-greedy": partial( + Qwen2VLChat, + model_path="/user/xuqixin/checkpoints/qwen25_vl-7b/grpo_v7_exp0_qwen25vl_grpo_opensource_math_onlinefilter_regen/global_step_300/actor/huggingface", + min_pixels=1280 * 28 * 28, + max_pixels=16384 * 28 * 28, + use_custom_prompt=False, + use_vllm=True, + do_sample=True + ), + "Qwen2.5-VL-7B-Instruct-AWQ": partial( + Qwen2VLChat, + model_path="Qwen/Qwen2.5-VL-7B-Instruct-AWQ", + min_pixels=1280 * 28 * 28, + max_pixels=16384 * 28 * 28, + use_custom_prompt=False, + ), + "Qwen2.5-VL-72B-Instruct": partial( + Qwen2VLChat, + model_path="Qwen/Qwen2.5-VL-72B-Instruct", + min_pixels=1280 * 28 * 28, + max_pixels=16384 * 28 * 28, + use_custom_prompt=False, + ), + "Qwen2.5-VL-72B-Instruct-AWQ": partial( + Qwen2VLChat, + model_path="Qwen/Qwen2.5-VL-72B-Instruct-AWQ", + min_pixels=1280 * 28 * 28, + max_pixels=16384 * 28 * 28, + use_custom_prompt=False, + ), + "VLM-R1": partial( + VLMR1Chat, + model_path="omlab/VLM-R1-Qwen2.5VL-3B-Math-0305", + min_pixels=1280 * 28 * 28, + max_pixels=16384 * 28 * 28, + use_custom_prompt=False, + ), +} + +slime_series = { + "Slime-7B": partial(SliME, model_path="yifanzhang114/SliME-vicuna-7B"), + "Slime-8B": partial(SliME, model_path="yifanzhang114/SliME-Llama3-8B"), + "Slime-13B": partial(SliME, model_path="yifanzhang114/SliME-vicuna-13B"), +} + +eagle_series = { + "Eagle-X4-8B-Plus": partial(Eagle, model_path="NVEagle/Eagle-X4-8B-Plus"), + "Eagle-X4-13B-Plus": partial(Eagle, model_path="NVEagle/Eagle-X4-13B-Plus"), + "Eagle-X5-7B": partial(Eagle, model_path="NVEagle/Eagle-X5-7B"), + "Eagle-X5-13B": partial(Eagle, model_path="NVEagle/Eagle-X5-13B"), + "Eagle-X5-13B-Chat": partial(Eagle, model_path="NVEagle/Eagle-X5-13B-Chat"), + "Eagle-X5-34B-Chat": partial(Eagle, model_path="NVEagle/Eagle-X5-34B-Chat"), + "Eagle-X5-34B-Plus": partial(Eagle, model_path="NVEagle/Eagle-X5-34B-Plus"), +} + +moondream_series = { + "Moondream1": partial(Moondream1, model_path="vikhyatk/moondream1"), + "Moondream2": partial(Moondream2, model_path="vikhyatk/moondream2"), +} + +llama_series = { + "Llama-3.2-11B-Vision-Instruct": partial( + llama_vision, model_path="meta-llama/Llama-3.2-11B-Vision-Instruct" + ), + "LLaVA-CoT": partial(llama_vision, model_path="Xkev/Llama-3.2V-11B-cot"), + "Llama-3.2-90B-Vision-Instruct": partial( + llama_vision, model_path="meta-llama/Llama-3.2-90B-Vision-Instruct" + ), +} + +molmo_series = { + "molmoE-1B-0924": partial(molmo, model_path="allenai/MolmoE-1B-0924"), + "molmo-7B-D-0924": partial(molmo, model_path="allenai/Molmo-7B-D-0924"), + "molmo-7B-O-0924": partial(molmo, model_path="allenai/Molmo-7B-O-0924"), + "molmo-72B-0924": partial(molmo, model_path="allenai/Molmo-72B-0924"), +} + +kosmos_series = { + "Kosmos2": partial(Kosmos2, model_path="microsoft/kosmos-2-patch14-224") +} + +points_series = { + "POINTS-Yi-1.5-9B-Chat": partial( + POINTS, model_path="WePOINTS/POINTS-Yi-1-5-9B-Chat" + ), + "POINTS-Qwen-2.5-7B-Chat": partial( + POINTS, model_path="WePOINTS/POINTS-Qwen-2-5-7B-Chat" + ), + "POINTSV15-Qwen-2.5-7B-Chat": partial( + POINTSV15, model_path="WePOINTS/POINTS-1-5-Qwen-2-5-7B-Chat" + ), +} + +nvlm_series = { + "NVLM": partial(NVLM, model_path="nvidia/NVLM-D-72B"), +} + +vintern_series = { + "Vintern-3B-beta": partial(VinternChat, model_path="5CD-AI/Vintern-3B-beta"), + "Vintern-1B-v2": partial(VinternChat, model_path="5CD-AI/Vintern-1B-v2"), +} + +aria_series = {"Aria": partial(Aria, model_path="rhymes-ai/Aria")} + +h2ovl_series = { + "h2ovl-mississippi-2b": partial(H2OVLChat, model_path="h2oai/h2ovl-mississippi-2b"), + "h2ovl-mississippi-1b": partial( + H2OVLChat, model_path="h2oai/h2ovl-mississippi-800m" + ), +} + +valley_series = { + "valley_eagle": partial( + ValleyEagleChat, model_path="bytedance-research/Valley-Eagle-7B" + ), +} + +ola_series = { + "ola": partial(Ola, model_path="THUdyh/Ola-7b"), +} + +ross_series = { + "ross-qwen2-7b": partial(Ross, model_path="HaochenWang/ross-qwen2-7b"), +} + +ursa_series = {"URSA-8B": partial(UrsaChat, model_path="URSA-MATH/URSA-8B")} + +gemma_series = { + "paligemma-3b-mix-448": partial( + PaliGemma, model_path="google/paligemma-3b-mix-448" + ), + 'Gemma3-4B': partial(Gemma3, model_path='google/gemma-3-4b-it'), + 'Gemma3-12B': partial(Gemma3, model_path='google/gemma-3-12b-it'), + 'Gemma3-27B': partial(Gemma3, model_path='google/gemma-3-27b-it') +} + +supported_VLM = {} + +model_groups = [ + ungrouped, + o1_apis, + api_models, + xtuner_series, + qwen_series, + llava_series, + internvl_series, + yivl_series, + xcomposer_series, + minigpt4_series, + idefics_series, + instructblip_series, + deepseekvl_series, + deepseekvl2_series, + janus_series, + minicpm_series, + cogvlm_series, + wemm_series, + cambrian_series, + chameleon_series, + video_models, + ovis_series, + vila_series, + mantis_series, + mmalaya_series, + phi3_series, + phi4_series, + xgen_mm_series, + qwen2vl_series, + slime_series, + eagle_series, + moondream_series, + llama_series, + molmo_series, + kosmos_series, + points_series, + nvlm_series, + vintern_series, + h2ovl_series, + aria_series, + smolvlm_series, + sail_series, + valley_series, + vita_series, + ross_series, + emu_series, + ola_series, + ursa_series, + gemma_series, + long_vita_series, +] + +for grp in model_groups: + supported_VLM.update(grp) diff --git a/vlmeval/dataset/Omnidocbench/__init__.py b/vlmeval/dataset/Omnidocbench/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/vlmeval/dataset/Omnidocbench/data_preprocess.py b/vlmeval/dataset/Omnidocbench/data_preprocess.py new file mode 100644 index 0000000000000000000000000000000000000000..28aff6b31c50232fd9931bf5e3a541aaf50e1b0a --- /dev/null +++ b/vlmeval/dataset/Omnidocbench/data_preprocess.py @@ -0,0 +1,447 @@ +import re +import unicodedata +from pylatexenc.latex2text import LatexNodes2Text +from bs4 import BeautifulSoup +import subprocess +import shutil +import uuid +import html +import os + +def remove_markdown_fences(content): + content = re.sub(r'^```markdown\n?', '', content, flags=re.MULTILINE) + content = re.sub(r'```\n?$', '', content, flags=re.MULTILINE) + return content + +# Standardize all consecutive characters +def replace_repeated_chars(input_str): + input_str = re.sub(r'_{4,}', '____', input_str) # Replace more than 4 consecutive underscores with 4 underscores + input_str = re.sub(r' {4,}', ' ', input_str) # Replace more than 4 consecutive spaces with 4 spaces + return re.sub(r'([^a-zA-Z0-9])\1{10,}', r'\1\1\1\1', input_str) # For other consecutive symbols (except numbers and letters), replace more than 10 occurrences with 4 + +# Special Unicode handling +def fullwidth_to_halfwidth(s): + result = [] + for char in s: + code = ord(char) + # Convert full-width space to half-width space + if code == 0x3000: + code = 0x0020 + # Convert other full-width characters to half-width + elif 0xFF01 <= code <= 0xFF5E: + code -= 0xFEE0 + result.append(chr(code)) + return ''.join(result) + +def find_special_unicode(s): + special_chars = {} + for char in s: + if ord(char) > 127: # Non-ASCII characters + # unicode_name = unicodedata.name(char, None) + unicode_name = unicodedata.category(char) + special_chars[char] = f'U+{ord(char):04X} ({unicode_name})' + return special_chars + +# # Define dictionary for Unicode character replacements +# unicode_replacements = { +# "\u00A9": r"$\copyright$", # Copyright symbol © to latex +# "\u00AE": r"$^\circledR$", # Registered trademark ® to latex +# "\u2122": r"$^\text{TM}$", # Trademark ™ to latex +# "\u2018": "'", # Left single quote to straight quote +# "\u2019": "'", # Right single quote to straight quote +# "\u201C": "\"", # Left double quote to straight quote +# "\u201D": "\"", # Right double quote to straight quote +# "\u2013": "-", # En dash to hyphen +# "\u2014": "-", # Em dash to hyphen +# "\u2026": "...", # Unicode ellipsis to three dots +# "\u2103": r"$\textdegree C$", # ℃ +# "\u03B1": r"$\alpha$", # α +# "\u03B2": r"$\beta$", # β +# "\u03A3": r"$\Sigma$", # Σ +# } + +# # Use regex to replace Unicode characters +# def replace_unicode(match): +# char = match.group(0) +# return unicode_replacements.get(char, char) + +inline_reg = re.compile( + r'\$(.*?)\$|' + r'\\\((.*?)\\\)', +) + +def textblock2unicode(text): + inline_matches = inline_reg.finditer(text) + removal_positions = [] + for match in inline_matches: + position = [match.start(), match.end()] + content = match.group(1) if match.group(1) is not None else match.group(2) + # print('-------- content-------', content) + # Remove escape characters \ + clean_content = re.sub(r'\\([\\_&%^])', '', content) + + try: + if any(char in clean_content for char in r'\^_'): + if clean_content.endswith('\\'): + clean_content += ' ' + # inline_array.append(match.group(0)) + unicode_content = LatexNodes2Text().latex_to_text(clean_content) + removal_positions.append((position[0], position[1], unicode_content)) + except: + continue + + # Remove inline formulas from original text + for start, end, unicode_content in sorted(removal_positions, reverse=True): + text = text[:start] + unicode_content.strip() + text[end:] + + return text + +def normalized_formula(text): + # Normalize math formulas before matching + filter_list = ['\\mathbf', '\\mathrm', '\\mathnormal', '\\mathit', '\\mathbb', '\\mathcal', '\\mathscr', '\\mathfrak', '\\mathsf', '\\mathtt', + '\\textbf', '\\text', '\\boldmath', '\\boldsymbol', '\\operatorname', '\\bm', + '\\symbfit', '\\mathbfcal', '\\symbf', '\\scriptscriptstyle', '\\notag', + '\\setlength', '\\coloneqq', '\\space', '\\thickspace', '\\thinspace', '\\medspace', '\\nobreakspace', '\\negmedspace', + '\\quad', '\\qquad', '\\enspace', '\\substackw', ' '] + # '\\left', '\\right', '{', '}', ' '] + + # delimiter_filter + pattern = re.compile(r"\\\[(.+?)(?]*>(.*)' + tables = re.findall(pattern, table_res, re.DOTALL | re.IGNORECASE) + table_res = ''.join(tables) + # table_res = re.sub('','',table_res) + table_res = re.sub('( style=".*?")', "", table_res) + table_res = re.sub('( height=".*?")', "", table_res) + table_res = re.sub('( width=".*?")', "", table_res) + table_res = re.sub('( align=".*?")', "", table_res) + table_res = re.sub('( class=".*?")', "", table_res) + table_res = re.sub('',"",table_res) + + table_res = re.sub(r'\s+', " ", table_res) + table_res_no_space = '' + table_res.replace(' ','') + '
' + # table_res_no_space = re.sub(' (style=".*?")',"",table_res_no_space) + # table_res_no_space = re.sub(r'[ ]', " ", table_res_no_space) + table_res_no_space = re.sub('colspan="', ' colspan="', table_res_no_space) + table_res_no_space = re.sub('rowspan="', ' rowspan="', table_res_no_space) + table_res_no_space = re.sub('border="', ' border="', table_res_no_space) + + table_res = '' + table_res + '
' + # table_flow.append(table_res) + # table_flow_no_space.append(table_res_no_space) + + return table_res, table_res_no_space + + def clean_table(input_str,flag=True): + if flag: + input_str = input_str.replace('', '').replace('', '') + input_str = input_str.replace('', '').replace('', '') + input_str = input_str.replace('', '').replace('', '') + input_str = input_str.replace('
', '').replace('
', '') + input_str = input_str.replace('

', '').replace('

', '') + input_str = input_str.replace('', '') + input_str = re.sub('.*?','',input_str) + return input_str + + norm_text, _ = process_table_html(text) + norm_text = clean_table(norm_text) + return norm_text + +def normalized_latex_table(text): + def latex_template(latex_code): + template = r''' + \documentclass[border=20pt]{article} + \usepackage{subcaption} + \usepackage{url} + \usepackage{graphicx} + \usepackage{caption} + \usepackage{multirow} + \usepackage{booktabs} + \usepackage{color} + \usepackage{colortbl} + \usepackage{xcolor,soul,framed} + \usepackage{fontspec} + \usepackage{amsmath,amssymb,mathtools,bm,mathrsfs,textcomp} + \setlength{\parindent}{0pt}''' + \ + r''' + \begin{document} + ''' + \ + latex_code + \ + r''' + \end{document}''' + + return template + + def process_table_latex(latex_code): + SPECIAL_STRINGS= [ + ['\\\\vspace\\{.*?\\}', ''], + ['\\\\hspace\\{.*?\\}', ''], + ['\\\\rule\{.*?\\}\\{.*?\\}', ''], + ['\\\\addlinespace\\[.*?\\]', ''], + ['\\\\addlinespace', ''], + ['\\\\renewcommand\\{\\\\arraystretch\\}\\{.*?\\}', ''], + ['\\\\arraystretch\\{.*?\\}', ''], + ['\\\\(row|column)?colors?\\{[^}]*\\}(\\{[^}]*\\}){0,2}', ''], + ['\\\\color\\{.*?\\}', ''], + ['\\\\textcolor\\{.*?\\}', ''], + ['\\\\rowcolor(\\[.*?\\])?\\{.*?\\}', ''], + ['\\\\columncolor(\\[.*?\\])?\\{.*?\\}', ''], + ['\\\\cellcolor(\\[.*?\\])?\\{.*?\\}', ''], + ['\\\\colorbox\\{.*?\\}', ''], + ['\\\\(tiny|scriptsize|footnotesize|small|normalsize|large|Large|LARGE|huge|Huge)', ''], + [r'\s+', ' '], + ['\\\\centering', ''], + ['\\\\begin\\{table\\}\\[.*?\\]', '\\\\begin{table}'], + ['\t', ''], + ['@{}', ''], + ['\\\\toprule(\\[.*?\\])?', '\\\\hline'], + ['\\\\bottomrule(\\[.*?\\])?', '\\\\hline'], + ['\\\\midrule(\\[.*?\\])?', '\\\\hline'], + ['p\\{[^}]*\\}', 'l'], + ['m\\{[^}]*\\}', 'c'], + ['\\\\scalebox\\{[^}]*\\}\\{([^}]*)\\}', '\\1'], + ['\\\\textbf\\{([^}]*)\\}', '\\1'], + ['\\\\textit\\{([^}]*)\\}', '\\1'], + ['\\\\cmidrule(\\[.*?\\])?\\(.*?\\)\\{([0-9]-[0-9])\\}', '\\\\cline{\\2}'], + ['\\\\hline', ''], + [r'\\multicolumn\{1\}\{[^}]*\}\{((?:[^{}]|(?:\{[^{}]*\}))*)\}', r'\1'] + ] + pattern = r'\\begin\{tabular\}.*\\end\{tabular\}' # 注意这里不用 .*? + matches = re.findall(pattern, latex_code, re.DOTALL) + latex_code = ' '.join(matches) + + for special_str in SPECIAL_STRINGS: + latex_code = re.sub(fr'{special_str[0]}', fr'{special_str[1]}', latex_code) + + return latex_code + + def convert_latex_to_html(latex_content, cache_dir='./temp'): + if not os.path.exists(cache_dir): + os.makedirs(cache_dir) + + uuid_str = str(uuid.uuid1()) + with open(f'{cache_dir}/{uuid_str}.tex', 'w') as f: + f.write(latex_template(latex_content)) + + cmd = ['latexmlc', '--quiet', '--nocomments', f'--log={cache_dir}/{uuid_str}.log', + f'{cache_dir}/{uuid_str}.tex', f'--dest={cache_dir}/{uuid_str}.html'] + try: + subprocess.run(cmd, check=True, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) + with open(f'{cache_dir}/{uuid_str}.html', 'r') as f: + html_content = f.read() + + pattern = r']*>(.*)' + tables = re.findall(pattern, html_content, re.DOTALL | re.IGNORECASE) + tables = [f'{table}
' for table in tables] + html_content = '\n'.join(tables) + + except Exception as e: + html_content = '' + + shutil.rmtree(cache_dir) + return html_content + + html_text = convert_latex_to_html(text) + normlized_tables = normalized_html_table(html_text) + return normlized_tables + + +def normalized_table(text, format='html'): + if format not in ['html', 'latex']: + raise ValueError('Invalid format: {}'.format(format)) + else: + return globals()['normalized_{}_table'.format(format)](text) + + +def textblock_with_norm_formula(text): + inline_matches = inline_reg.finditer(text) + removal_positions = [] + for match in inline_matches: + position = [match.start(), match.end()] + content = match.group(1) if match.group(1) is not None else match.group(2) + # print('-------- content-------', content) + + norm_content = normalized_formula(content) + removal_positions.append((position[0], position[1], norm_content)) + + # Remove inline formulas from original text + for start, end, norm_content in sorted(removal_positions, reverse=True): + text = text[:start] + norm_content.strip() + text[end:] + + return text + +# def inline_filter_unicode(text): +# # Ensure text is string type +# if not isinstance(text, str): +# text = str(text) + +# # Convert LaTeX content to Unicode representation +# text = LatexNodes2Text().latex_to_text(text) + +# inline_array = [] +# inline_matches = inline_reg.finditer(text) + +# for match in inline_matches: +# position = [match.start(), match.end()] +# content = match.group(1) if match.group(1) is not None else match.group(2) + +# # Remove escape characters \ +# clean_content = re.sub(r'\\([\\_&%^])', '', content) + +# if any(char in clean_content for char in r'\^_'): +# # inline_array.append(match.group(0)) +# inline_array.append({ +# 'category_type': 'equation_inline', +# 'position': position, +# 'content': match.group(0), +# }) +# text = text.replace(match.group(0), '') +# # print('-----Found inline formula: ', match.group(0)) +# else: +# text = text.replace(match.group(0), content) +# # # Add to inline_array +# # inline_array.append({ +# # 'category_type': 'equation_inline', +# # 'position': position, +# # 'content': content, +# # }) + +# # # Remove matched formula from original text, can choose to replace with spaces or remove directly +# # text = text[:position[0]] + ' '*(position[1]-position[0]) + text[position[1]:] + +# return text, inline_array + +def inline_filter_unicode(text): + # Ensure text is string type + if not isinstance(text, str): + text = str(text) + + # Replace inline formula boundary markers + #print('--------text-------',text) + placeholder = '__INLINE_FORMULA_BOUNDARY__' + text_copy = text.replace('$', placeholder).replace('\\(', placeholder).replace('\\)', placeholder) + #print('--------text_copy-------',text_copy) + # Convert LaTeX content to Unicode representation + text_copy = LatexNodes2Text().latex_to_text(text_copy) + #print('--------text_copy---unicode----',text_copy) + # Restore boundary markers + text_copy = text_copy.replace(placeholder, '$') + + inline_array = [] + inline_matches = inline_reg.finditer(text_copy) + # Record positions of inline formulas to be removed + removal_positions = [] + + for match in inline_matches: + position = [match.start(), match.end()] + content = match.group(1) if match.group(1) is not None else match.group(2) + print('-------- content-------', content) + # Remove escape characters \ + clean_content = re.sub(r'\\([\\_&%^])', '', content) + + if any(char in clean_content for char in r'\^_'): + # inline_array.append(match.group(0)) + inline_array.append({ + 'category_type': 'equation_inline', + 'position': position, + 'content': content, + }) + removal_positions.append((position[0], position[1])) + + # Remove inline formulas from original text + for start, end in sorted(removal_positions, reverse=True): + text = text[:start] + text[end:] + + return text, inline_array + +def inline_filter(text): + # Ensure text is string type + if not isinstance(text, str): + text = str(text) + + inline_array = [] + inline_matches = inline_reg.finditer(text) + + for match in inline_matches: + position = [match.start(), match.end()] + content = match.group(1) if match.group(1) is not None else match.group(2) + # print('inline_content: ', content) + + # Remove escape characters \ + clean_content = re.sub(r'\\([\\_&%^])', '', content) + + if any(char in clean_content for char in r'\^_'): + # inline_array.append(match.group(0)) + inline_array.append({ + 'category_type': 'equation_inline', + 'position': position, + 'content': match.group(0), + }) + text = text.replace(match.group(0), '') + # print('-----Found inline formula: ', match.group(0)) + else: + text = text.replace(match.group(0), content) + + return text, inline_array + +# Text OCR quality check processing: +def clean_string(input_string): + # Use regex to keep Chinese characters, English letters and numbers + input_string = input_string.replace('\\t', '').replace('\\n', '').replace('\t', '').replace('\n', '').replace('/t', '').replace('/n', '') + cleaned_string = re.sub(r'[^\w\u4e00-\u9fff]', '', input_string) + return cleaned_string \ No newline at end of file diff --git a/vlmeval/dataset/Omnidocbench/metrics.py b/vlmeval/dataset/Omnidocbench/metrics.py new file mode 100644 index 0000000000000000000000000000000000000000..cb9761d3463bd89c247196a4cd2f227defc1e136 --- /dev/null +++ b/vlmeval/dataset/Omnidocbench/metrics.py @@ -0,0 +1,490 @@ +import json +import time +import Levenshtein +import evaluate +import random +import pdb +import copy +import pandas as pd + +from .utils import save_paired_result,normalized_table +from collections import defaultdict +from apted.helpers import Tree +from apted import APTED, Config +from lxml import etree, html +from collections import deque +from tqdm import tqdm +from collections import defaultdict +from tabulate import tabulate + +def show_result(results): + for metric_name in results.keys(): + print(f'{metric_name}:') + score_table = [[k,v] for k,v in results[metric_name].items()] + print(tabulate(score_table)) + print('='*100) + +def sort_nested_dict(d): + # If it's a dictionary, recursively sort it + if isinstance(d, dict): + # Sort the current dictionary + sorted_dict = {k: sort_nested_dict(v) for k, v in sorted(d.items())} + return sorted_dict + # If not a dictionary, return directly + return d + +def get_full_labels_results(samples:dict): + if not samples: + return {} + label_group_dict = defaultdict(lambda: defaultdict(list)) + for sample in samples: + label_list = [] + if not sample.get("gt_attribute"): + continue + for anno in sample["gt_attribute"]: + for k,v in anno.items(): + label_list.append(k+": "+str(v)) + for label_name in list(set(label_list)): # Currently if there are merged cases, calculate based on the set of all labels involved after merging + for metric, score in sample['metric'].items(): + label_group_dict[label_name][metric].append(score) + + print('----Anno Attribute---------------') + result = {} + result['sample_count'] = {} + for attribute in label_group_dict.keys(): + for metric, scores in label_group_dict[attribute].items(): + mean_score = sum(scores) / len(scores) + if not result.get(metric): + result[metric] = {} + result[metric][attribute] = mean_score + result['sample_count'][attribute] = len(scores) + result = sort_nested_dict(result) + show_result(result) + return result + + +def get_page_split(samples, page_info): # Page level metric + if not page_info: + return {} + result_list = defaultdict(list) + + + for sample in samples: + img_name = sample['img_id'] if sample['img_id'].endswith('.jpg') else '_'.join(sample['img_id'].split('_')[:-1]) + page_info_s = page_info[img_name] + if not sample.get('metric'): + continue + for metric, score in sample['metric'].items(): + gt = sample['norm_gt'] if sample.get('norm_gt') else sample['gt'] + pred = sample['norm_pred'] if sample.get('norm_pred') else sample['pred'] + result_list[metric].append({ + 'image_name': img_name, + 'metric': metric, + 'attribute': 'ALL', + 'score': score, + 'upper_len': max(len(gt), len(pred)) + }) + for k,v in page_info_s.items(): + if isinstance(v, list): # special issue + for special_issue in v: + if 'table' not in special_issue: # Table-related special fields have duplicates + result_list[metric].append({ + 'image_name': img_name, + 'metric': metric, + 'attribute': special_issue, + 'score': score, + 'upper_len': max(len(gt), len(pred)) + }) + else: + result_list[metric].append({ + 'image_name': img_name, + 'metric': metric, + 'attribute': k+": "+str(v), + 'score': score, + 'upper_len': max(len(gt), len(pred)) + }) + + # Page level logic, accumulation is only done within pages, and mean operation is performed between pages + result = {} + if result_list.get('Edit_dist'): + df = pd.DataFrame(result_list['Edit_dist']) + up_total_avg = df.groupby(["image_name", "attribute"]).apply(lambda x: (x["score"]*x['upper_len']).sum() / x['upper_len'].sum()).groupby('attribute').mean() # At page level, accumulate edits, denominator is sum of max(gt, pred) from each sample + result['Edit_dist'] = up_total_avg.to_dict() + for metric in result_list.keys(): + if metric == 'Edit_dist': + continue + df = pd.DataFrame(result_list[metric]) + page_avg = df.groupby(["image_name", "attribute"]).apply(lambda x: x["score"].mean()).groupby('attribute').mean() + result[metric] = page_avg.to_dict() + + result = sort_nested_dict(result) + # print('----Page Attribute---------------') + show_result(result) + return result + + +def get_groups(samples, group_info): + group_samples = defaultdict(list) + for sample in samples: + group_samples['all'].append(sample) + for group in group_info: + select_flag = True + for k, v in group.items(): + for gt_attribute in sample['gt_attribute']: # gt_attribute is a list containing all merged gt attributes + if not gt_attribute: # if no GT attributes, don't include in calculation + select_flag = False + elif gt_attribute[k] != v: # if any gt attribute doesn't meet criteria, don't select + select_flag = False + if select_flag: + group_samples[str(group)].append(sample) + return group_samples + + +class Registry: + def __init__(self): + self._registry = {} + def register(self, name): + def decorator(item): + if name in self._registry: + raise ValueError(f"Item {name} already registered.") + self._registry[name] = item + return item + return decorator + def get(self, name): + if name not in self._registry: + raise ValueError(f"Item {name} not found in registry.") + return self._registry[name] + def list_items(self): + return list(self._registry.keys()) + +METRIC_REGISTRY = Registry() + + +@METRIC_REGISTRY.register("TEDS") +class call_TEDS(): + def __init__(self, samples): + self.samples = samples + def evaluate(self, group_info=[], save_name='default'): + teds = TEDS(structure_only=False) + teds_structure_only = TEDS(structure_only=True) + + group_scores = defaultdict(list) + group_scores_structure_only = defaultdict(list) + + samples = self.samples + for sample in samples: + gt = sample['norm_gt'] if sample.get('norm_gt') else sample['gt'] + pred = sample['norm_pred'] if sample.get('norm_pred') else sample['pred'] + + score = teds.evaluate(pred, gt) + score_structure_only = teds_structure_only.evaluate(pred, gt) + # print('TEDS score:', score) + group_scores['all'].append(score) + group_scores_structure_only['all'].append(score_structure_only) + + if not sample.get('metric'): + sample['metric'] = {} + sample['metric']['TEDS'] = score + sample['metric']['TEDS_structure_only'] = score_structure_only + + for group in group_info: + select_flag = True + for k, v in group.items(): + for gt_attribute in sample['gt_attribute']: # gt_attribute is a list containing all merged gt attributes + if not gt_attribute: # if no GT attributes, don't include in calculation + select_flag = False + elif gt_attribute[k] != v: # if any gt attribute doesn't meet criteria, don't select + select_flag = False + if select_flag: + group_scores[str(group)].append(score) + + result = {} + for group_name, scores in group_scores.items(): + if len(scores) > 0: + result[group_name] = sum(scores) / len(scores) # average of normalized scores at sample level + else: + result[group_name] = 'NaN' + print(f'Warning: Empyty matched samples for {group_name}.') + + structure_only_result = {} + for group_name, scores in group_scores_structure_only.items(): + if len(scores) > 0: + structure_only_result[group_name] = sum(scores) / len(scores) # average of normalized scores at sample level + else: + structure_only_result[group_name] = 'NaN' + print(f'Warning: Empyty matched samples for {group_name}.') + + return samples,{'TEDS': result, 'TEDS_structure_only': structure_only_result} + + +@METRIC_REGISTRY.register("BLEU") +class call_BLEU(): + def __init__(self, samples): + self.samples = samples + def evaluate(self, group_info=[], save_name='default'): + group_samples = get_groups(self.samples, group_info) + result = {} + bleu = evaluate.load("bleu", keep_in_memory=True, experiment_id=random.randint(1,1e8)) + + for group_name, samples in group_samples.items(): + predictions, references = [], [] + for sample in samples: + gt = sample['norm_gt'] if sample.get('norm_gt') else sample['gt'] + pred = sample['norm_pred'] if sample.get('norm_pred') else sample['pred'] + predictions.append(pred) + references.append(gt) + + if not predictions or not any(predictions) or not references or not any(references): + bleu_score = 0 + else: + try: + bleu_results = bleu.compute(predictions=predictions, references=references) + bleu_score = bleu_results["bleu"] + except ZeroDivisionError: + bleu_score = 0 + + result[group_name] = bleu_score + + return self.samples,{'BLEU': result} + +@METRIC_REGISTRY.register("METEOR") +class call_METEOR(): + def __init__(self, samples): + self.samples = samples + def evaluate(self, group_info=[], save_name='default'): + group_samples = get_groups(self.samples, group_info) + result = {} + for group_name, samples in group_samples.items(): + predictions, references = [], [] + for sample in samples: + gt = sample['norm_gt'] if sample.get('norm_gt') else sample['gt'] + pred = sample['norm_pred'] if sample.get('norm_pred') else sample['pred'] + predictions.append(gt) + references.append(pred) + meteor = evaluate.load('meteor', keep_in_memory=True, experiment_id=random.randint(1,1e8)) + meteor_results = meteor.compute(predictions=predictions, references=references) + result[group_name] = meteor_results['meteor'] + + return self.samples,{'METEOR': result} + + +@METRIC_REGISTRY.register("Edit_dist") +class call_Edit_dist(): + def __init__(self, samples): + self.samples = samples + def evaluate(self, group_info=[], save_name='default'): + samples = self.samples + for sample in samples: + img_name = sample['img_id'] if sample['img_id'].endswith('.jpg') else '_'.join(sample['img_id'].split('_')[:-1]) + sample['image_name'] = img_name + gt = sample['norm_gt'] if sample.get('norm_gt') else sample['gt'] + pred = sample['norm_pred'] if sample.get('norm_pred') else sample['pred'] + upper_len = max(len(pred), len(gt)) + sample['upper_len'] = upper_len + if len(pred) > 0 or len(gt) > 0: + edit_dist = Levenshtein.distance(pred, gt) + if not sample.get('metric'): + sample['metric'] = {} + sample['metric']['Edit_dist'] = edit_dist / upper_len + sample['Edit_num'] = edit_dist + + if isinstance(samples, list): + saved_samples = samples + else: + saved_samples = samples.samples + + if not saved_samples: + return {'Edit_dist': {'ALL_page_avg': 'NaN'}} + + df = pd.DataFrame(saved_samples) + up_total_avg = df.groupby("image_name").apply(lambda x: x['Edit_num'].sum() / x['upper_len'].sum()) # page level, sum of edits divided by sum of max(gt,pred) lengths for each sample + per_img_score = up_total_avg.to_dict() + + return samples,{'Edit_dist': {'ALL_page_avg': up_total_avg.mean()}} + + +@METRIC_REGISTRY.register("CDM") +class call_CDM(): + def __init__(self, samples): + self.samples = samples + def evaluate(self, group_info=[], save_name='default'): + if isinstance(self.samples, list): + cdm_samples = copy.deepcopy(self.samples) + else: + cdm_samples = copy.deepcopy(self.samples.samples) + for idx, sample in enumerate(cdm_samples): + sample['img_name'] = sample['img_id'] + sample['img_id'] = str(idx) + sample['gt'] = sample['gt'].lstrip("$$").rstrip("$$").strip() + sample['pred'] = sample['pred'].split("```latex")[-1].split("```")[0] + sample['pred'] = sample['pred'].lstrip("$$").rstrip("$$").strip() + + return self.samples,False + + +class TEDS(object): + ''' Tree Edit Distance basead Similarity + ''' + def __init__(self, structure_only=False, n_jobs=1, ignore_nodes=None): + assert isinstance(n_jobs, int) and (n_jobs >= 1), 'n_jobs must be an integer greather than 1' + self.structure_only = structure_only + self.n_jobs = n_jobs + self.ignore_nodes = ignore_nodes + self.__tokens__ = [] + + def tokenize(self, node): + ''' Tokenizes table cells + ''' + self.__tokens__.append('<%s>' % node.tag) + if node.text is not None: + self.__tokens__ += list(node.text) + for n in node.getchildren(): + self.tokenize(n) + if node.tag != 'unk': + self.__tokens__.append('' % node.tag) + if node.tag != 'td' and node.tail is not None: + self.__tokens__ += list(node.tail) + + def load_html_tree(self, node, parent=None): + ''' Converts HTML tree to the format required by apted + ''' + global __tokens__ + if node.tag == 'td': + if self.structure_only: + cell = [] + else: + self.__tokens__ = [] + self.tokenize(node) + cell = self.__tokens__[1:-1].copy() + new_node = TableTree(node.tag, + int(node.attrib.get('colspan', '1')), + int(node.attrib.get('rowspan', '1')), + cell, *deque()) + else: + new_node = TableTree(node.tag, None, None, None, *deque()) + if parent is not None: + parent.children.append(new_node) + if node.tag != 'td': + for n in node.getchildren(): + self.load_html_tree(n, new_node) + if parent is None: + return new_node + + def evaluate(self, pred, true): + ''' Computes TEDS score between the prediction and the ground truth of a + given sample + ''' + if (not pred) or (not true): + return 0.0 + parser = html.HTMLParser(remove_comments=True, encoding='utf-8') + pred = html.fromstring(pred, parser=parser) + true = html.fromstring(true, parser=parser) + if pred.xpath('body/table') and true.xpath('body/table'): + pred = pred.xpath('body/table')[0] + true = true.xpath('body/table')[0] + if self.ignore_nodes: + etree.strip_tags(pred, *self.ignore_nodes) + etree.strip_tags(true, *self.ignore_nodes) + n_nodes_pred = len(pred.xpath(".//*")) + n_nodes_true = len(true.xpath(".//*")) + n_nodes = max(n_nodes_pred, n_nodes_true) + tree_pred = self.load_html_tree(pred) + tree_true = self.load_html_tree(true) + distance = APTED(tree_pred, tree_true, CustomConfig()).compute_edit_distance() + return 1.0 - (float(distance) / n_nodes) + else: + return 0.0 + + def batch_evaluate(self, pred_json, true_json): + ''' Computes TEDS score between the prediction and the ground truth of + a batch of samples + @params pred_json: {'FILENAME': 'HTML CODE', ...} + @params true_json: {'FILENAME': {'html': 'HTML CODE'}, ...} + @output: {'FILENAME': 'TEDS SCORE', ...} + ''' + samples = true_json.keys() + # if self.n_jobs == 1: + scores = [self.evaluate(pred_json.get(filename, ''), true_json[filename]['html']) for filename in tqdm(samples)] + # else: + # inputs = [{'pred': pred_json.get(filename, ''), 'true': true_json[filename]['html']} for filename in samples] + # scores = parallel_process(inputs, self.evaluate, use_kwargs=True, n_jobs=self.n_jobs, front_num=1) + scores = dict(zip(samples, scores)) + return scores + + +class CustomConfig(Config): + @staticmethod + def maximum(*sequences): + """Get maximum possible value + """ + return max(map(len, sequences)) + + def normalized_distance(self, *sequences): + """Get distance from 0 to 1 + """ + return float(Levenshtein.distance(*sequences)) / self.maximum(*sequences) + + def rename(self, node1, node2): + """Compares attributes of trees""" + if (node1.tag != node2.tag) or (node1.colspan != node2.colspan) or (node1.rowspan != node2.rowspan): + return 1. + if node1.tag == 'td': + if node1.content or node2.content: + return self.normalized_distance(node1.content, node2.content) + return 0. + + +class TableTree(Tree): + def __init__(self, tag, colspan=None, rowspan=None, content=None, *children): + self.tag = tag + self.colspan = colspan + self.rowspan = rowspan + self.content = content + self.children = list(children) + + def bracket(self): + """Show tree using brackets notation""" + if self.tag == 'td': + result = '"tag": %s, "colspan": %d, "rowspan": %d, "text": %s' % \ + (self.tag, self.colspan, self.rowspan, self.content) + else: + result = '"tag": %s' % self.tag + for child in self.children: + result += child.bracket() + return "{{{}}}".format(result) + + +class recogition_end2end_base_dataset(): + def __init__(self, samples): + img_id = 0 + for sample in samples: + if not sample.get('img_id'): + sample['img_id'] = img_id + img_id += 1 + self.samples = samples + def __getitem__(self, idx): + return self.samples[idx] + + +class recogition_end2end_table_dataset(recogition_end2end_base_dataset): + def __init__(self, samples, table_format): + self.pred_table_format = table_format + self.samples = self.normalize_data(samples) + + def normalize_data(self, samples): + img_id = 0 + for sample in samples: + p = sample['pred'] + r = sample['gt'] + p = normalized_table(p, self.pred_table_format) + r = normalized_table(r) + sample['norm_gt'] = r + sample['norm_pred'] = p + sample['img_id'] = sample['img_id'] if sample.get('img_id') else img_id + img_id += 1 + + return samples + + + + diff --git a/vlmeval/dataset/Omnidocbench/omnidocbench.py b/vlmeval/dataset/Omnidocbench/omnidocbench.py new file mode 100644 index 0000000000000000000000000000000000000000..72e4d8f417ca27d2539bfa4cfde24eec1425587b --- /dev/null +++ b/vlmeval/dataset/Omnidocbench/omnidocbench.py @@ -0,0 +1,554 @@ +import json +import os +import copy +import pandas as pd +import tempfile +import base64 +from tqdm import tqdm +import torch.distributed as dist +from ..image_base import ImageBaseDataset +from ...smp import * + + +class OmniDocBench(ImageBaseDataset): + + MODALITY = 'IMAGE' + TYPE = 'QA' + + DATASET_URL = {'OmniDocBench':'https://huggingface.co/datasets/ouyanglinke/OmniDocBench_tsv/resolve/main/OmniDocBench.tsv'} + DATASET_MD5 = {'OmniDocBench': '0fa5ccf31e682e219cb9ca83da741a59'} + + + system_prompt = r'''You are an AI assistant specialized in converting PDF images to Markdown format. Please follow these instructions for the conversion: + + 1. Text Processing: + - Accurately recognize all text content in the PDF image without guessing or inferring. + - Convert the recognized text into Markdown format. + - Maintain the original document structure, including headings, paragraphs, lists, etc. + + 2. Mathematical Formula Processing: + - Convert all mathematical formulas to LaTeX format. + - Enclose inline formulas with \( \). For example: This is an inline formula \( E = mc^2 \) + - Enclose block formulas with \\[ \\]. For example: \[ \frac{-b \pm \sqrt{b^2 - 4ac}}{2a} \] + + 3. Table Processing: + - Convert tables to HTML format. + - Wrap the entire table with and
. + + 4. Figure Handling: + - Ignore figures content in the PDF image. Do not attempt to describe or convert images. + + 5. Output Format: + - Ensure the output Markdown document has a clear structure with appropriate line breaks between elements. + - For complex layouts, try to maintain the original document's structure and format as closely as possible. + + Please strictly follow these guidelines to ensure accuracy and consistency in the conversion. Your task is to accurately convert the content of the PDF image into Markdown format without adding any extra explanations or comments. + ''' + + def __init__(self,dataset='OmniDocBench',**kwargs): + super().__init__(dataset,**kwargs) + print(f'self.img_root:{self.img_root}') + + def build_prompt(self, line): + + image_path = self.dump_image(line)[0] + msg = [ + dict(type='image', value=image_path), + dict(type='text', value=self.system_prompt) + ] + return msg + + def evaluate(self, eval_file, **judge_kwargs): + tsv_path=self.data_path + End2end_evaluator=end2end_evaluator(eval_file,tsv_path) + Table_evalutor=table_evalutor(eval_file,tsv_path) + + metrics_all=End2end_evaluator.score() + metircs_table=Table_evalutor.score() + + return metrics_all + + +class end2end_evaluator(): + def __init__(self, + eval_file, + tsv_path, + match_method:str='quick_match', + filter_types:dict=None): + self.result_foler='../../../outputs/OmniDocBench' + if not os.path.exists(self.result_foler): + os.makedirs(self.result_foler) + self.eval_file=eval_file + self.match_method=match_method + self.references=[] + self.predictions = load(eval_file)['prediction'].tolist() + self.dafault_metircs_dict={ + 'text_block': + {'metric': ['Edit_dist', 'BLEU', 'METEOR']}, + 'display_formula': + {'metric': ['Edit_dist', 'CDM']}, + 'table': + {'metric': ['TEDS', 'Edit_dist']}, + 'reading_order': + {'metric': ['Edit_dist']} + } + + references = load(tsv_path)['answer'].tolist() + + load_success,load_fail=0,0 + for i,ans in tqdm(enumerate(references),desc='Loading data'): + try: + ans = json.loads(ans) + load_success+=1 + self.references.append(ans) #[{},{}] + except json.JSONDecodeError as e: + load_fail+=1 + continue + print(f'load_success:{load_success},load_fail:{load_fail}') + + filtered_gt_samples = [] + if filter_types: + for gt_sample in self.references: + select_flag = True + for k, v in filter_types.items(): + if gt_sample["page_info"]["page_attribute"][k] != v: + select_flag = False + if select_flag: + filtered_gt_samples.append(gt_sample) + else: + filtered_gt_samples = self.references #[{},{},{}] + self.references=filtered_gt_samples + + + def score(self)->dict: + samples=self.get_matched_elements(self.references,self.predictions) + metrics=self.process_generated_metric_results(samples) + return metrics + + def get_page_elements(self, selected_annos): + saved_element_dict = defaultdict(list) + related_truncated = [] + truncated_all = {} + for relation in selected_annos["extra"]["relation"]: # Handle truncated text issues + if relation["relation_type"] == 'truncated': + truncated_all[relation["source_anno_id"]] = "" + truncated_all[relation["target_anno_id"]] = "" + exist_flag = False + for merge_list in related_truncated: + if relation["source_anno_id"] in merge_list or relation["target_anno_id"] in merge_list: # Consider cases where three text blocks may need to be merged + merge_list.append(relation["source_anno_id"]) + merge_list.append(relation["target_anno_id"]) + exist_flag = True + if not exist_flag: + related_truncated.append([relation["source_anno_id"], relation["target_anno_id"]]) + + for item in selected_annos['layout_dets']: + if item['anno_id'] not in truncated_all.keys(): + saved_element_dict[item["category_type"]].append(item) + else: + truncated_all[item['anno_id']] = item + + for merge_list in related_truncated: + text_block_list = [truncated_all[key] for key in merge_list] + sorted_block = sorted(text_block_list, key=lambda x: x['order']) + text = "" + for block in sorted_block: + text += block['text'] + merged_block = { + "category_type": sorted_block[0]["category_type"], # Directly use information from the first block + "order": sorted_block[0]["order"], + "anno_id": sorted_block[0]["anno_id"], + "text": text, + "merge_list": sorted_block + } + saved_element_dict[sorted_block[0]["category_type"]].append(merged_block) + + return saved_element_dict + + def get_page_elements_list(self, gt_page_elements, category_list): + element_list = [] + for category_type in category_list: + if gt_page_elements.get(category_type): + element_list.extend(gt_page_elements[category_type]) + return element_list + + def get_sorted_text_list(self, selected_annos): + # txt_type: text, latex, html + text_list = [] + for item in selected_annos: + if item.get('order'): + order = item['order'] + else: + order = 0 + # 【txt_type,selecte_annos] + text_list.append((order, item)) + sorted_text_list = sorted(text_list, key=lambda x: x[0]) + return [_[1] for _ in sorted_text_list] + + def filtered_out_ignore(self, items, ignore_category_list): + filted_items = [] + for item in items: + if item['gt_category_type'] not in ignore_category_list: + filted_items.append(item) + return filted_items + + def get_order_paired(self, order_match_s, img_name): + matched = [(item['gt_position'], item['pred_position']) for item in order_match_s if (item['gt_position'] != [""] and item['pred_position'] != "")] + gt_idx_all = [item['gt_position'] for item in order_match_s if (item['gt_position'] != [""])] + read_order_pred = [i[0] for i in sorted(matched, key=lambda x: x[1])] + read_order_gt = sum(gt_idx_all, []) # Convert to one-dimensional list + read_order_gt = [x for x in read_order_gt if x] + gt = sorted(read_order_gt) + pred = sum(read_order_pred, []) + pred = [x for x in pred if x] + if len(pred) > 0 or len(gt) > 0: + import Levenshtein + edit = Levenshtein.distance(gt, pred)/ max(len(pred), len(gt)) + return { + 'gt': gt, + 'pred': pred, + 'img_id': img_name, + 'edit': edit + } + else: + return {} # If both GT and pred are empty for the page, return empty + + def formula_format(self, formula_matches, img_name): + # formated_list = [] + for i, item in enumerate(formula_matches): + item["img_id"] = img_name + '_' + str(i) + return formula_matches + + def get_matched_elements(self,references:list,predictions:list)->dict: + from .metrics import recogition_end2end_base_dataset, recogition_end2end_table_dataset + + plain_text_match = [] + display_formula_match = [] + html_table_match = [] + latex_table_match = [] + order_match = [] + + + for i,sample in enumerate(references): + img_name = os.path.basename(sample["page_info"]["image_path"]) + pred_content = predictions[i] + result = self.process_get_matched_elements(sample, pred_content, img_name) + [plain_text_match_clean, formated_display_formula, latex_table_match_s, html_table_match_s, order_match_single] = result + + if order_match_single: + order_match.append(order_match_single) + if plain_text_match_clean: + plain_text_match.extend(plain_text_match_clean) + if formated_display_formula: + display_formula_match.extend(formated_display_formula) + if latex_table_match_s: + latex_table_match.extend(latex_table_match_s) + if html_table_match_s: + html_table_match.extend(html_table_match_s) + + if len(latex_table_match) > len(html_table_match): + table_match = latex_table_match + table_format = 'latex' + else: + table_match = html_table_match + table_format = 'html' + + matched_samples_all = { + "text_block": recogition_end2end_base_dataset(plain_text_match), + "display_formula": recogition_end2end_base_dataset(display_formula_match), + "table": recogition_end2end_table_dataset(table_match, table_format), + "reading_order": recogition_end2end_base_dataset(order_match) + } + + return matched_samples_all + + def process_get_matched_elements(self, sample, pred_content, img_name): + from .utils import match_gt2pred_simple, match_gt2pred_no_split, match_gt2pred_quick, md_tex_filter + from func_timeout import FunctionTimedOut, func_timeout + + if self.match_method == 'simple_match': # add match choice + match_gt2pred = match_gt2pred_simple + elif self.match_method == 'quick_match': + match_gt2pred = match_gt2pred_quick + elif self.match_method == 'no_split': + match_gt2pred = match_gt2pred_no_split + else: + # print('Invalid match method name. The quick_match will be used.') + match_gt2pred = match_gt2pred_quick + + pred_dataset = md_tex_filter(pred_content) + gt_page_elements = self.get_page_elements(sample) + + text_all = self.get_page_elements_list(gt_page_elements, ['text_block', 'title', 'code_txt', 'code_txt_caption', 'reference', 'equation_caption', + 'figure_caption', 'figure_footnote', 'table_caption', 'table_footnote', 'code_algorithm', 'code_algorithm_caption', + 'header', 'footer', 'page_footnote', 'page_number']) + + + display_formula_match_s = [] + plain_text_match_clean = [] + latex_table_match_s = [] + html_table_match_s = [] + order_match_single = [] + if text_all: + gt_text_list = self.get_sorted_text_list(text_all) + try: + plain_text_match_s = func_timeout( + 30, match_gt2pred, args=(gt_text_list, pred_dataset['text_all'], 'text', img_name) + ) + except FunctionTimedOut as e1: + print(f'Time out for plain text match of {img_name}, match_gt2pred_simple will be used.') + plain_text_match_s = match_gt2pred_simple(gt_text_list, pred_dataset['text_all'], 'text', img_name) + except Exception as e: + print(str(e)) + sys.exit() + + if not plain_text_match_s: + print(f'No text match of {img_name}. The plain text match will be empty.') + else: + plain_text_match_clean = self.filtered_out_ignore(plain_text_match_s, ['figure_caption', 'figure_footnote', 'table_caption', 'table_footnote', 'code_algorithm', 'code_algorithm_caption', 'header', 'footer', 'page_footnote', 'page_number', 'equation_caption']) + + + if gt_page_elements.get('equation_isolated'): + gt_display_list = self.get_sorted_text_list(gt_page_elements['equation_isolated']) + display_formula_match_s = match_gt2pred(gt_display_list, pred_dataset['equation_isolated'], 'formula', img_name) + display_formula_match_s = [x for x in display_formula_match_s if x['gt_idx'] != [""]] + if not display_formula_match_s: + print(f'No display_formula_match of {img_name}. The display_formula_match will be empty.') + + if gt_page_elements.get('table'): + gt_table_list = self.get_sorted_text_list(gt_page_elements['table']) + if pred_dataset['latex_table']: + latex_table_match_s = match_gt2pred_simple(gt_table_list, pred_dataset['latex_table'], 'latex_table', img_name) + latex_table_match_s = [x for x in latex_table_match_s if x['gt_idx'] != [""]] + if pred_dataset['html_table']: + html_table_match_s = match_gt2pred_simple(gt_table_list, pred_dataset['html_table'], 'html_table', img_name) + html_table_match_s = [x for x in html_table_match_s if x['gt_idx'] != [""]] + else: + html_table_match_s = match_gt2pred_simple(gt_table_list, [], 'html_table', img_name) + html_table_match_s = [x for x in html_table_match_s if x['gt_idx'] != [""]] + + + order_match_s = plain_text_match_clean + if order_match_s: + order_match_single = self.get_order_paired(order_match_s, img_name) + + return [plain_text_match_clean, display_formula_match_s, latex_table_match_s, html_table_match_s, order_match_single] + + def process_generated_metric_results(self,samples,save_name:str='end2end_quick_match'): + from .metrics import show_result, get_full_labels_results, get_page_split, METRIC_REGISTRY + + result_all={} + page_info={} + metircs_dict=self.dafault_metircs_dict + pages=self.references #gt_samples list + + for page in pages: + img_path=os.path.basename(page['page_info']['image_path']) + page_info[img_path]=page['page_info']['page_attribute'] + + for element in metircs_dict.keys(): + + result={} + group_info=metircs_dict[element].get('group',[]) + # samples = samples.get(element) ## + cur_samples = samples[element] + + for metric in metircs_dict[element]['metric']: + metric_val = METRIC_REGISTRY.get(metric) + + cur_samples,result_s = metric_val(cur_samples).evaluate(group_info, f"{save_name}_{element}") + if result_s: + result.update(result_s) + + if result: + print(f"{element}") + show_result(result) + result_all[element]={} + + + group_result=get_full_labels_results(cur_samples) + page_result=get_page_split(cur_samples,page_info) + + result_all[element]={ + 'all':result, + 'group':group_result, + 'page':page_result + } + if not os.path.exists('./output/OmniDocBench'): + os.makedirs('./output/OmniDocBench') + if isinstance(cur_samples,list): + saved_samples=cur_samples + else: + saved_samples=cur_samples.samples + with open(os.path.join(self.result_foler,f'{save_name}_result.josn'),'w',encoding='utf-8') as f: + json.dump(saved_samples,f,indent=4,ensure_ascii=False) + + with open(os.path.join(self.result_foler,f'{save_name}_metric_result.json'),'w',encoding='utf-8') as f: + json.dump(result_all,f,indent=4,ensure_ascii=False) + + dict_list = [] + save_dict={} + en_overall=[] + ch_overall=[] + for category_type, metric in [("text_block", "Edit_dist"), ("display_formula", "Edit_dist"), ("display_formula", "CDM"), ("table", "TEDS"), ("table", "Edit_dist"), ("reading_order", "Edit_dist")]: + if metric == 'CDM': + save_dict[category_type+'_'+metric+'_EN'] = '-' + save_dict[category_type+'_'+metric+'_CH'] = '-' + elif metric == "TEDS": + save_dict[category_type+'_'+metric+'_EN'] = result_all[category_type]["page"][metric]["language: english"] * 100 + save_dict[category_type+'_'+metric+'_CH'] = result_all[category_type]["page"][metric]["language: simplified_chinese"] * 100 + else: + save_dict[category_type+'_'+metric+'_EN'] = result_all[category_type]["page"][metric].get("language: english", np.nan) + save_dict[category_type+'_'+metric+'_CH'] = result_all[category_type]["page"][metric].get("language: simplified_chinese",np.nan) + if metric == "Edit_dist": + en_overall.append(result_all[category_type]["page"][metric].get("language: english", np.nan)) + ch_overall.append(result_all[category_type]["page"][metric].get("language: simplified_chinese",np.nan)) + + save_dict['overall_EN'] = sum(en_overall) / len(en_overall) + save_dict['overall_CH'] = sum(ch_overall) / len(ch_overall) + dict_list.append(save_dict) + df = pd.DataFrame(dict_list,index=['end2end',]).round(3) + + with open(os.path.join(self.result_foler,'End2End_Evaluation.json'),'w',encoding='utf-8') as f: + json.dump(result_all,f,indent=4,ensure_ascii=False) + df.to_csv(os.path.join(self.result_foler,'overall.csv')) + over_all_path=os.path.join(self.result_foler,'End2End_Evaluation.json') + print(f"The save path of overall.csv is :{over_all_path}") + return df + + +class table_evalutor(): + def __init__(self,eval_file,tsv_path): + + self.result_foler='../../../outputs/OmniDocBench' + if not os.path.exists(self.result_foler): + os.makedirs(self.result_foler) + gt_key='html' + pred_key='pred' + self.category_filter='table' + self.category_type='table' + self.metircs_list=['TEDS','Edit_dist'] + self.gt_samples,self.table_samples=self.load_data(eval_file,tsv_path,pred_key,gt_key) + + def load_data(self,eval_file,gt_file,pred_key,gt_key): + from .data_preprocess import clean_string, normalized_formula, textblock2unicode, normalized_table + samples=[] + preds=[] + predictions=pd.read_excel(eval_file)['prediction'].tolist() + gt_samples=pd.read_csv(gt_file,sep='\t')['answer'].tolist() + load_success,load_fail=0,0 + for i,gt_sample in tqdm(enumerate(gt_samples),desc='Loading data'): + try: + ans=json.loads(gt_sample) + for item in ans['layout_dets']: + if item['category_type']=="table": + item['pred']=predictions[i] + load_success+=1 + preds.append(ans) + + except json.JSONDecodeError as e: + load_fail+=1 + continue + print(f'load_table_success:{load_success},load_table_fail:{load_fail}') + + count=0 + for pred in preds: + img_name = os.path.basename(pred['page_info']['image_path']) + for i, ann in enumerate(pred['layout_dets']): + if not ann.get(gt_key): + continue + if self.category_filter: + if ann['category_type'] not in self.category_filter: + continue + if not ann.get(pred_key): + # print(f'Cannot find pred for {img_name}. ann is {ann}') + # pdb.set_trace() + count += 1 + continue + else: + gt_text = ann[gt_key] + norm_gt = gt_text + pred_text = ann[pred_key] + norm_pred = pred_text + if self.category_type: + if self.category_type == 'text': + norm_gt = clean_string(textblock2unicode(ann[gt_key])) + norm_pred = clean_string(textblock2unicode(ann[pred_key])) + elif self.category_type == 'formula': + norm_gt = normalized_formula(ann[gt_key]) + norm_pred = normalized_formula(ann[pred_key]) + elif self.category_type == 'table': + norm_gt = normalized_table(ann[gt_key], gt_key) + norm_pred = normalized_table(ann[pred_key], gt_key) + else: + raise ValueError(f'Invalid category type: {self.category_type}') + + samples.append({ + "gt": gt_text, + "norm_gt": norm_gt, + "gt_attribute": [ann['attribute']], + 'pred': pred_text, + "norm_pred": norm_pred, + 'img_id': img_name + }) + + print(f'Cannot find pred for {count} samples.') + return preds,samples + + def score(self)->dict: + metrics=self.process_generated_metric_results() + return metrics + + def process_generated_metric_results(self,save_name:str='OmniDocBench_table'): + from .metrics import show_result, get_full_labels_results, get_page_split, METRIC_REGISTRY + + p_scores={} + page_info={} + no_page_flag=False + samples=self.table_samples + pages=self.gt_samples + + for page in pages: + if 'page_info' not in page: + no_page_flag=True + break + img_path=os.path.basename(page['page_info']['image_path']) + page_info[img_path]=page['page_info']['page_attribute'] + + for metric in self.metircs_list: + metric_val=METRIC_REGISTRY.get(metric) + samples, result = metric_val(samples).evaluate({}, save_name) + if result: + p_scores.update(result) + show_result(p_scores) + group_result=get_full_labels_results(samples) + if no_page_flag: + page_result={} + else: + page_result=get_page_split(samples,page_info) + + result_all={ + 'all':p_scores, + 'group':group_result, + 'page':page_result + } + + with open(os.path.join(self.result_foler,f'{save_name}_metric_result.json'),'w',encoding='utf-8') as f: + json.dump(result_all,f,indent=4,ensure_ascii=False) + + dict_list=[] + dict_list.append(result_all["group"]["TEDS"]) + + df4 = pd.DataFrame(dict_list, index=['OmniDocBench_table']) + df4 = df4 * 100 + df4 = df4.round(1) + selected_columns = df4[["language: table_en", "language: table_simplified_chinese", "language: table_en_ch_mixed", "line: full_line", "line: less_line", "line: fewer_line", "line: wireless_line", + "with_span: True", "with_span: False", "include_equation: True", "include_equation: False", "include_background: True", "include_background: False", "table_layout: vertical", "table_layout: horizontal"]] + + selected_columns.to_csv(os.path.join(self.result_foler,'table_attribute.csv')) + table_attribute_path=os.path.join(self.result_foler,'table_attribute.csv') + print(f'The save path of table_attribute.csv is :{table_attribute_path}') + selected_columns + + + return selected_columns diff --git a/vlmeval/dataset/Omnidocbench/requirements.txt b/vlmeval/dataset/Omnidocbench/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..d8f9669d70ba3613616cb7ea36886ccfb77d79ed --- /dev/null +++ b/vlmeval/dataset/Omnidocbench/requirements.txt @@ -0,0 +1,13 @@ +torchvision +Levenshtein +BeautifulSoup4 +pylatexenc +scipy +evaluate +apted +lxml +func_timeout +accelerate>=0.26.0 +jmespath +qwen_vl_utils +nltk \ No newline at end of file diff --git a/vlmeval/dataset/Omnidocbench/utils.py b/vlmeval/dataset/Omnidocbench/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..de8067ac101ced753c9accc0e1cb87413697c031 --- /dev/null +++ b/vlmeval/dataset/Omnidocbench/utils.py @@ -0,0 +1,1916 @@ +import re +import unicodedata +import subprocess +import shutil +import uuid +import html +import os +import sys +import pdb +import json +import copy +import unicodedata + +import Levenshtein +import numpy as np +from bs4 import BeautifulSoup +from pylatexenc.latex2text import LatexNodes2Text +from scipy.optimize import linear_sum_assignment +from pylatexenc.latexencode import unicode_to_latex +from pylatexenc.latex2text import LatexNodes2Text +from pylatexenc.latexwalker import LatexWalker, LatexEnvironmentNode, LatexCharsNode, LatexGroupNode, LatexMacroNode, LatexSpecialsNode +from collections import defaultdict + + +def read_md_file(filepath): + with open(filepath, 'r', encoding='utf-8') as file: + content = file.read() + + return content + +def save_paired_result(preds, gts, save_path): + save_result = [] + formula_id = 0 + for gt, pred in zip(gts, preds): + save_result.append({ + "gt": gt, + "pred": pred, + "img_id": formula_id + }) + formula_id += 1 + with open(save_path, 'w', encoding='utf-8') as f: + json.dump(save_result, f, indent=4, ensure_ascii=False) + +def remove_markdown_fences(content): + content = re.sub(r'^```markdown\n?', '', content, flags=re.MULTILINE) + content = re.sub(r'```\n?$', '', content, flags=re.MULTILINE) + return content + +# Standardize all consecutive characters +def replace_repeated_chars(input_str): + input_str = re.sub(r'_{4,}', '____', input_str) # Replace more than 4 consecutive underscores with 4 underscores + input_str = re.sub(r' {4,}', ' ', input_str) # Replace more than 4 consecutive spaces with 4 spaces + return re.sub(r'([^a-zA-Z0-9])\1{10,}', r'\1\1\1\1', input_str) # For other consecutive symbols (except numbers and letters), replace more than 10 occurrences with 4 + +# Special Unicode handling +def fullwidth_to_halfwidth(s): + result = [] + for char in s: + code = ord(char) + # Convert full-width space to half-width space + if code == 0x3000: + code = 0x0020 + # Convert other full-width characters to half-width + elif 0xFF01 <= code <= 0xFF5E: + code -= 0xFEE0 + result.append(chr(code)) + return ''.join(result) + +def find_special_unicode(s): + special_chars = {} + for char in s: + if ord(char) > 127: # Non-ASCII characters + # unicode_name = unicodedata.name(char, None) + unicode_name = unicodedata.category(char) + special_chars[char] = f'U+{ord(char):04X} ({unicode_name})' + return special_chars + + +inline_reg = re.compile( + r'\$(.*?)\$|' + r'\\\((.*?)\\\)', +) + +def textblock2unicode(text): + inline_matches = inline_reg.finditer(text) + removal_positions = [] + for match in inline_matches: + position = [match.start(), match.end()] + content = match.group(1) if match.group(1) is not None else match.group(2) + # print('-------- content-------', content) + # Remove escape characters \ + clean_content = re.sub(r'\\([\\_&%^])', '', content) + + try: + if any(char in clean_content for char in r'\^_'): + if clean_content.endswith('\\'): + clean_content += ' ' + # inline_array.append(match.group(0)) + unicode_content = LatexNodes2Text().latex_to_text(clean_content) + removal_positions.append((position[0], position[1], unicode_content)) + except: + continue + + # Remove inline formulas from original text + for start, end, unicode_content in sorted(removal_positions, reverse=True): + text = text[:start] + unicode_content.strip() + text[end:] + + return text + +def normalized_formula(text): + # Normalize math formulas before matching + filter_list = ['\\mathbf', '\\mathrm', '\\mathnormal', '\\mathit', '\\mathbb', '\\mathcal', '\\mathscr', '\\mathfrak', '\\mathsf', '\\mathtt', + '\\textbf', '\\text', '\\boldmath', '\\boldsymbol', '\\operatorname', '\\bm', + '\\symbfit', '\\mathbfcal', '\\symbf', '\\scriptscriptstyle', '\\notag', + '\\setlength', '\\coloneqq', '\\space', '\\thickspace', '\\thinspace', '\\medspace', '\\nobreakspace', '\\negmedspace', + '\\quad', '\\qquad', '\\enspace', '\\substackw', ' '] + # '\\left', '\\right', '{', '}', ' '] + + # delimiter_filter + pattern = re.compile(r"\\\[(.+?)(?]*>(.*)' + tables = re.findall(pattern, table_res, re.DOTALL | re.IGNORECASE) + table_res = ''.join(tables) + # table_res = re.sub('','',table_res) + table_res = re.sub('( style=".*?")', "", table_res) + table_res = re.sub('( height=".*?")', "", table_res) + table_res = re.sub('( width=".*?")', "", table_res) + table_res = re.sub('( align=".*?")', "", table_res) + table_res = re.sub('( class=".*?")', "", table_res) + table_res = re.sub('',"",table_res) + + table_res = re.sub(r'\s+', " ", table_res) + table_res_no_space = '' + table_res.replace(' ','') + '
' + # table_res_no_space = re.sub(' (style=".*?")',"",table_res_no_space) + # table_res_no_space = re.sub(r'[ ]', " ", table_res_no_space) + table_res_no_space = re.sub('colspan="', ' colspan="', table_res_no_space) + table_res_no_space = re.sub('rowspan="', ' rowspan="', table_res_no_space) + table_res_no_space = re.sub('border="', ' border="', table_res_no_space) + + table_res = '' + table_res + '
' + # table_flow.append(table_res) + # table_flow_no_space.append(table_res_no_space) + + return table_res, table_res_no_space + + def clean_table(input_str,flag=True): + if flag: + input_str = input_str.replace('', '').replace('', '') + input_str = input_str.replace('', '').replace('', '') + input_str = input_str.replace('', '').replace('', '') + input_str = input_str.replace('
', '').replace('
', '') + input_str = input_str.replace('

', '').replace('

', '') + input_str = input_str.replace('', '') + input_str = re.sub('.*?','',input_str) + return input_str + + norm_text, _ = process_table_html(text) + norm_text = clean_table(norm_text) + return norm_text + +def normalized_latex_table(text): + def latex_template(latex_code): + template = r''' + \documentclass[border=20pt]{article} + \usepackage{subcaption} + \usepackage{url} + \usepackage{graphicx} + \usepackage{caption} + \usepackage{multirow} + \usepackage{booktabs} + \usepackage{color} + \usepackage{colortbl} + \usepackage{xcolor,soul,framed} + \usepackage{fontspec} + \usepackage{amsmath,amssymb,mathtools,bm,mathrsfs,textcomp} + \setlength{\parindent}{0pt}''' + \ + r''' + \begin{document} + ''' + \ + latex_code + \ + r''' + \end{document}''' + + return template + + def process_table_latex(latex_code): + SPECIAL_STRINGS= [ + ['\\\\vspace\\{.*?\\}', ''], + ['\\\\hspace\\{.*?\\}', ''], + ['\\\\rule\{.*?\\}\\{.*?\\}', ''], + ['\\\\addlinespace\\[.*?\\]', ''], + ['\\\\addlinespace', ''], + ['\\\\renewcommand\\{\\\\arraystretch\\}\\{.*?\\}', ''], + ['\\\\arraystretch\\{.*?\\}', ''], + ['\\\\(row|column)?colors?\\{[^}]*\\}(\\{[^}]*\\}){0,2}', ''], + ['\\\\color\\{.*?\\}', ''], + ['\\\\textcolor\\{.*?\\}', ''], + ['\\\\rowcolor(\\[.*?\\])?\\{.*?\\}', ''], + ['\\\\columncolor(\\[.*?\\])?\\{.*?\\}', ''], + ['\\\\cellcolor(\\[.*?\\])?\\{.*?\\}', ''], + ['\\\\colorbox\\{.*?\\}', ''], + ['\\\\(tiny|scriptsize|footnotesize|small|normalsize|large|Large|LARGE|huge|Huge)', ''], + [r'\s+', ' '], + ['\\\\centering', ''], + ['\\\\begin\\{table\\}\\[.*?\\]', '\\\\begin{table}'], + ['\t', ''], + ['@{}', ''], + ['\\\\toprule(\\[.*?\\])?', '\\\\hline'], + ['\\\\bottomrule(\\[.*?\\])?', '\\\\hline'], + ['\\\\midrule(\\[.*?\\])?', '\\\\hline'], + ['p\\{[^}]*\\}', 'l'], + ['m\\{[^}]*\\}', 'c'], + ['\\\\scalebox\\{[^}]*\\}\\{([^}]*)\\}', '\\1'], + ['\\\\textbf\\{([^}]*)\\}', '\\1'], + ['\\\\textit\\{([^}]*)\\}', '\\1'], + ['\\\\cmidrule(\\[.*?\\])?\\(.*?\\)\\{([0-9]-[0-9])\\}', '\\\\cline{\\2}'], + ['\\\\hline', ''], + [r'\\multicolumn\{1\}\{[^}]*\}\{((?:[^{}]|(?:\{[^{}]*\}))*)\}', r'\1'] + ] + pattern = r'\\begin\{tabular\}.*\\end\{tabular\}' # 注意这里不用 .*? + matches = re.findall(pattern, latex_code, re.DOTALL) + latex_code = ' '.join(matches) + + for special_str in SPECIAL_STRINGS: + latex_code = re.sub(fr'{special_str[0]}', fr'{special_str[1]}', latex_code) + + return latex_code + + def convert_latex_to_html(latex_content, cache_dir='./temp'): + if not os.path.exists(cache_dir): + os.makedirs(cache_dir) + + uuid_str = str(uuid.uuid1()) + with open(f'{cache_dir}/{uuid_str}.tex', 'w') as f: + f.write(latex_template(latex_content)) + + cmd = ['latexmlc', '--quiet', '--nocomments', f'--log={cache_dir}/{uuid_str}.log', + f'{cache_dir}/{uuid_str}.tex', f'--dest={cache_dir}/{uuid_str}.html'] + try: + subprocess.run(cmd, check=True, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) + with open(f'{cache_dir}/{uuid_str}.html', 'r') as f: + html_content = f.read() + + pattern = r']*>(.*)' + tables = re.findall(pattern, html_content, re.DOTALL | re.IGNORECASE) + tables = [f'{table}
' for table in tables] + html_content = '\n'.join(tables) + + except Exception as e: + html_content = '' + + shutil.rmtree(cache_dir) + return html_content + + html_text = convert_latex_to_html(text) + normlized_tables = normalized_html_table(html_text) + return normlized_tables + + +def normalized_table(text, format='html'): + if format not in ['html', 'latex']: + raise ValueError('Invalid format: {}'.format(format)) + else: + return globals()['normalized_{}_table'.format(format)](text) + + +def textblock_with_norm_formula(text): + inline_matches = inline_reg.finditer(text) + removal_positions = [] + for match in inline_matches: + position = [match.start(), match.end()] + content = match.group(1) if match.group(1) is not None else match.group(2) + # print('-------- content-------', content) + + norm_content = normalized_formula(content) + removal_positions.append((position[0], position[1], norm_content)) + + # Remove inline formulas from original text + for start, end, norm_content in sorted(removal_positions, reverse=True): + text = text[:start] + norm_content.strip() + text[end:] + + return text + + +def inline_filter_unicode(text): + # Ensure text is string type + if not isinstance(text, str): + text = str(text) + + # Replace inline formula boundary markers + #print('--------text-------',text) + placeholder = '__INLINE_FORMULA_BOUNDARY__' + text_copy = text.replace('$', placeholder).replace('\\(', placeholder).replace('\\)', placeholder) + #print('--------text_copy-------',text_copy) + # Convert LaTeX content to Unicode representation + text_copy = LatexNodes2Text().latex_to_text(text_copy) + #print('--------text_copy---unicode----',text_copy) + # Restore boundary markers + text_copy = text_copy.replace(placeholder, '$') + + inline_array = [] + inline_matches = inline_reg.finditer(text_copy) + # Record positions of inline formulas to be removed + removal_positions = [] + + for match in inline_matches: + position = [match.start(), match.end()] + content = match.group(1) if match.group(1) is not None else match.group(2) + print('-------- content-------', content) + # Remove escape characters \ + clean_content = re.sub(r'\\([\\_&%^])', '', content) + + if any(char in clean_content for char in r'\^_'): + # inline_array.append(match.group(0)) + inline_array.append({ + 'category_type': 'equation_inline', + 'position': position, + 'content': content, + }) + removal_positions.append((position[0], position[1])) + + # Remove inline formulas from original text + for start, end in sorted(removal_positions, reverse=True): + text = text[:start] + text[end:] + + return text, inline_array + +def inline_filter(text): + # Ensure text is string type + if not isinstance(text, str): + text = str(text) + + inline_array = [] + inline_matches = inline_reg.finditer(text) + + for match in inline_matches: + position = [match.start(), match.end()] + content = match.group(1) if match.group(1) is not None else match.group(2) + # print('inline_content: ', content) + + # Remove escape characters \ + clean_content = re.sub(r'\\([\\_&%^])', '', content) + + if any(char in clean_content for char in r'\^_'): + # inline_array.append(match.group(0)) + inline_array.append({ + 'category_type': 'equation_inline', + 'position': position, + 'content': match.group(0), + }) + text = text.replace(match.group(0), '') + # print('-----Found inline formula: ', match.group(0)) + else: + text = text.replace(match.group(0), content) + + return text, inline_array + +# Text OCR quality check processing: +def clean_string(input_string): + # Use regex to keep Chinese characters, English letters and numbers + input_string = input_string.replace('\\t', '').replace('\\n', '').replace('\t', '').replace('\n', '').replace('/t', '').replace('/n', '') + cleaned_string = re.sub(r'[^\w\u4e00-\u9fff]', '', input_string) + return cleaned_string + +def extract_tabular(text): + begin_pattern = r'\\begin{tabular}' + end_pattern = r'\\end{tabular}' + + tabulars = [] + positions = [] + current_pos = 0 + stack = [] + + while current_pos < len(text): + begin_match = re.search(begin_pattern, text[current_pos:]) + end_match = re.search(end_pattern, text[current_pos:]) + + if not begin_match and not end_match: + break + + if begin_match and (not end_match or begin_match.start() < end_match.start()): + stack.append(current_pos + begin_match.start()) + current_pos += begin_match.start() + len(end_pattern) + elif end_match: + if stack: + start_pos = stack.pop() + if not stack: + end_pos = current_pos + end_match.start() + len(end_pattern) + tabular_code = text[start_pos:end_pos] + tabulars.append(tabular_code) + positions.append((start_pos, end_pos)) + current_pos += end_match.start() + len(end_pattern) + else: + current_pos += 1 + + if stack: + new_start = stack[0] + len(begin_pattern) + new_tabulars, new_positions = extract_tabular(text[new_start:]) + new_positions = [(start + new_start, end + new_start) for start, end in new_positions] + tabulars.extend(new_tabulars) + positions.extend(new_positions) + + return tabulars, positions + +# math reg + # r'\\begin{equation\*?}(.*?)\\end{equation\*?}|' + # r'\\begin{align\*?}(.*?)\\end{align\*?}|' + # r'\\begin{gather\*?}(.*?)\\end{gather\*?}|' +display_reg = re.compile( + r'\$\$(.*?)\$\$|' + r'\\\[(.*?)\\\]|' + r'\$(.*?)\$|' + r'\\\((.*?)\\\)', + re.DOTALL +) + +# inline_reg = re.compile( +# r'(?)', + re.DOTALL +) + +# title +title_reg = re.compile( + r'^\s*#.*$', + re.MULTILINE) + +# img +img_pattern = r'!\[.*?\]\(.*?\)' + +# code block +code_block_reg = re.compile( + r'```(\w+)\n(.*?)```', + re.DOTALL +) + + +def md_tex_filter(content): + ''' + Input: 1 page md or tex content - String + Output: text, display, inline, table, title, code - list + ''' + content = re.sub(img_pattern, '', content) # remove image + content = remove_markdown_fences(content) # remove markdown fences + content = replace_repeated_chars(content) # replace all consecutive characters + + + + pred_all = [] + latex_table_array, table_positions = extract_tex_table(content) + for latex_table, position in zip(latex_table_array, table_positions): + position = [position[0], position[0]+len(latex_table)] # !!! + pred_all.append({ + 'category_type': 'latex_table', + 'position': position, + 'content': latex_table + }) + content = content[:position[0]] + ' '*(position[1]-position[0]) + content[position[1]:] # replace latex table with space + + + # extract html table + html_table_array, table_positions = extract_html_table(content) + for html_table, position in zip(html_table_array, table_positions): + position = [position[0], position[0]+len(html_table)] + pred_all.append({ + 'category_type': 'html_table', + 'position': position, + 'content': html_table + }) + content = content[:position[0]] + ' '*(position[1]-position[0]) + content[position[1]:] # replace html table with space + + # extract interline formula + display_matches = display_reg.finditer(content) + for match in display_matches: + matched = match.group(0) + if matched: + single_line = ''.join(matched.split()) + position = [match.start(), match.end()] + # replace $$ with \[\] + dollar_pattern = re.compile(r'\$\$(.*?)\$\$|\$(.*?)\$|\\\((.*?)\\\)', re.DOTALL) + sub_match = dollar_pattern.search(single_line) + if sub_match is None: + # pass + content = content[:position[0]] + ' '*(position[1]-position[0]) + content[position[1]:] + pred_all.append({ + 'category_type': 'equation_isolated', + 'position': position, + 'content': single_line + }) + elif sub_match.group(1): + single_line = re.sub(dollar_pattern, r'\\[\1\\]', single_line) + content = content[:position[0]] + ' '*(position[1]-position[0]) + content[position[1]:] # replace equation with space + pred_all.append({ + 'category_type': 'equation_isolated', + 'position': position, + 'content': single_line + }) + else: + single_line = re.sub(dollar_pattern, r'\\[\2\3\\]', single_line) + pred_all.append({ + 'category_type': 'equation_isolated', + 'position': position, + 'content': single_line, + 'fine_category_type': 'equation_inline' + }) + + + # extract md table with || + md_table_mathces = md_table_reg.findall(content+'\n') + if len(md_table_mathces) >= 2: + # print("md table found!") + # print("content:", content) + content = convert_markdown_to_html(content) + # print('----------content after converting md table to html:', content) + html_table_matches = html_table_reg.finditer(content) + if html_table_matches: + for match in html_table_matches: + matched = match.group(0) + position = [match.start(), match.end()] + # content = content.replace(match, '') + # print('content after removing the md table:', content) + content = content[:position[0]] + ' '*(position[1]-position[0]) + content[position[1]:] # replace md table with space + pred_all.append({ + 'category_type': 'html_table', + 'position': position, + 'content': matched.strip(), + 'fine_category_type': 'md2html_table' + }) + # print('---------After md table: \n', content) + + # extract code blocks + code_matches = code_block_reg.finditer(content) + if code_matches: + for match in code_matches: + position = [match.start(), match.end()] + language = match.group(1) + code = match.group(2).strip() + # content = content.replace(match.group(0), '') + content = content[:position[0]] + ' '*(position[1]-position[0]) + content[position[1]:] # replace code block with space + pred_all.append({ + 'category_type': 'text_all', + 'position': position, + 'content': code, + 'language': language, + 'fine_category_type': 'code' + }) + + + # Remove latex style + content = re.sub(r'\\title\{(.*?)\}', r'\1', content) + content = re.sub(r'\\title\s*\{\s*(.*?)\s*\}', r'\1', content, flags=re.DOTALL) + content = re.sub(r'\\text\s*\{\s*(.*?)\s*\}', r'\1', content, flags=re.DOTALL) + content = re.sub(r'\\section\*?\{(.*?)\}', r'\1', content) + content = re.sub(r'\\section\*?\{\s*(.*?)\s*\}', r'\1', content, flags=re.DOTALL) + + # extract texts + res = content.split('\n\n') + if len(res) == 1: + res = content.split('\n') # some models do not use double newlines, so use single newlines to split + + content_position = 0 + for text in res: + position = [content_position, content_position+len(text)] + content_position += len(text) + text = text.strip() + text = text.strip('\n') + # print('ori_text: ', text) + text = '\n'.join([_.strip() for _ in text.split('\n') if _.strip()]) # avoid some single newline content with many spaces + # print('after strip text: ', text) + + if text: # Check if the stripped text is not empty + if text.startswith(''): + pred_all.append({ + 'category_type': 'html_table', + 'position': position, + 'content': text, + }) + + elif text.startswith('$') and text.endswith('$'): + if text.replace('$', '').strip(): + pred_all.append({ + 'category_type': 'equation_isolated', + 'position': position, + 'content': text.strip(), + }) + else: + text = text.strip() + if text: + pred_all.append({ + 'category_type': 'text_all', + 'position': position, + 'content': text, + 'fine_category_type': 'text_block' + }) + + pred_dataset = defaultdict(list) + pred_all = sorted(pred_all, key=lambda x: x['position'][0]) + for item in pred_all: + pred_dataset[item['category_type']].append(item) + # pdb.set_trace() + return pred_dataset + + +def extract_tex_table(content): + tables = [] + tables_positions = [] + + pattern = r'\\begin{table}(.*?)\\end{table}' + for match in re.finditer(pattern, content, re.DOTALL): + start_pos = match.start() + end_pos = match.end() + table_content = match.group(0) + tables.append(table_content) + tables_positions.append((start_pos, end_pos)) + content = content[:start_pos] + ' '*(end_pos-start_pos) + content[end_pos:] + + tabulars, tabular_positions = extract_tabular(content) + all_tables = tables + tabulars + all_positions = tables_positions + tabular_positions + + all_result = sorted([[pos, table]for pos, table in zip(all_positions, all_tables)], key=lambda x: x[0][0]) + all_tables = [x[1] for x in all_result] + all_positions = [x[0] for x in all_result] + + return all_tables, all_positions + + +def extract_html_table(text): + begin_pattern = r']*)>' + end_pattern = r'' + + tabulars = [] + positions = [] + current_pos = 0 + stack = [] + + while current_pos < len(text): + begin_match = re.search(begin_pattern, text[current_pos:]) + end_match = re.search(end_pattern, text[current_pos:]) + + if not begin_match and not end_match: + break + + if begin_match and (not end_match or begin_match.start() < end_match.start()): + stack.append(current_pos + begin_match.start()) + current_pos += begin_match.start() + len(end_pattern) + elif end_match: + if stack: + start_pos = stack.pop() + if not stack: + end_pos = current_pos + end_match.start() + len(end_pattern) + tabular_code = text[start_pos:end_pos] + tabulars.append(tabular_code) + positions.append((start_pos, end_pos)) + current_pos += end_match.start() + len(end_pattern) + else: + current_pos += 1 + + if stack: + new_start = stack[0] + len(begin_pattern) + new_tabulars, new_positions = extract_html_table(text[new_start:]) + new_positions = [(start + new_start, end + new_start) for start, end in new_positions] + tabulars.extend(new_tabulars) + positions.extend(new_positions) + + return tabulars, positions + + +def extract_node_content(node): + """ Recursively extract content from LatexEnvironmentNode and rebuild LaTeX table representation """ + if isinstance(node, LatexCharsNode): + return node.chars # Use chars attribute + elif isinstance(node, LatexGroupNode): + return "{" + "".join(extract_node_content(n) for n in node.nodelist) + "}" + elif isinstance(node, LatexMacroNode): + # Extract macro command and its arguments + macro_content = "\\" + node.macroname + if node.nodeargs: + macro_content += "".join([extract_node_content(arg) for arg in node.nodeargs]) + return macro_content + elif isinstance(node, LatexEnvironmentNode): + # Extract environment, preserve environment name and arguments + content = "\\begin{" + node.environmentname + "}" + if node.nodeargd and node.nodeargd.argnlist: + # content += "".join("{" + extract_node_content(arg) + "}" for arg in node.nodeargd) + # content += "".join("{" + extract_node_content(node.nodeargd) + "}") + content += "{" + extract_node_content(node.nodeargd.argnlist[0]) + "}" + if node.nodelist: + content += "".join(extract_node_content(n) for n in node.nodelist) + content += "\\end{" + node.environmentname + "}" + return content + elif isinstance(node, LatexSpecialsNode): # Changed to LatexSpecialsNode + return node.specials_chars + else: + return "" + +def get_node_end_pos(node): + """Recursively determine the end position of a node""" + if hasattr(node, 'nodelist') and node.nodelist: + # If the node has child nodes, recursively find the end position of the last child node + return get_node_end_pos(node.nodelist[-1]) + elif hasattr(node, 'pos_end'): + # If the node has pos_end attribute, return it directly + return node.pos_end + else: + # If there are no child nodes, assume the node ends at the last character of its content + return node.pos + len(str(node)) + +def remove_tex_table(content): + tables, positions = extract_tex_table(content) + + # Delete in reverse order by position to avoid affecting unprocessed start positions + for start, end in sorted(positions, reverse=True): + content = content[:start] + content[end:] # Remove table content + + return content + + + +def get_pred_category_type(pred_idx, pred_items): + # if pred_idx: + if pred_items[pred_idx].get('fine_category_type'): + pred_pred_category_type = pred_items[pred_idx]['fine_category_type'] + else: + pred_pred_category_type = pred_items[pred_idx]['category_type'] + # else: + # pred_pred_category_type = "" + return pred_pred_category_type + + +def compute_edit_distance_matrix_new(gt_lines, matched_lines): + try: + distance_matrix = np.zeros((len(gt_lines), len(matched_lines))) + for i, gt_line in enumerate(gt_lines): + for j, matched_line in enumerate(matched_lines): + if len(gt_line) == 0 and len(matched_line) == 0: + distance_matrix[i][j] = 0 + else: + distance_matrix[i][j] = Levenshtein.distance(gt_line, matched_line) / max(len(matched_line), len(gt_line)) + return distance_matrix + except ZeroDivisionError: + #print("ZeroDivisionError occurred. Outputting norm_gt_lines and norm_pred_lines:") + # print("norm_gt_lines:", gt_lines) + # print("norm_pred_lines:", matched_lines) + raise + +def get_gt_pred_lines(gt_items, pred_items, line_type): + norm_html_lines = [] + gt_lines = [] + gt_cat_list = [] + for item in gt_items: + if item.get('fine_category_type'): + gt_cat_list.append(item['fine_category_type']) + else: + gt_cat_list.append(item['category_type']) + if item.get('content'): + gt_lines.append(str(item['content'])) + norm_html_lines.append(str(item['content'])) + elif line_type == 'text': + gt_lines.append(str(item['text'])) + elif line_type == 'html_table': + gt_lines.append(str(item['html'])) + elif line_type == 'formula': + gt_lines.append(str(item['latex'])) + elif line_type == 'latex_table': + gt_lines.append(str(item['latex'])) + norm_html_lines.append(str(item['html'])) + + pred_lines = [str(item['content']) for item in pred_items] + + + if line_type == 'formula': + norm_gt_lines = [normalized_formula(_) for _ in gt_lines] + norm_pred_lines = [normalized_formula(_) for _ in pred_lines] + elif line_type == 'text': + # norm_gt_lines = [textblock_with_norm_formula(_) for _ in gt_lines] + # norm_pred_lines = [textblock_with_norm_formula(_) for _ in pred_lines] + norm_gt_lines = [clean_string(textblock2unicode(_)) for _ in gt_lines] + norm_pred_lines = [clean_string(textblock2unicode(_)) for _ in pred_lines] + # norm_gt_lines = get_norm_text_lines(gt_lines) + # norm_pred_lines = get_norm_text_lines(pred_lines) + else: + norm_gt_lines = gt_lines + norm_pred_lines = pred_lines + + if line_type == 'latex_table': + gt_lines = norm_html_lines + + + filtered_lists = [(a, b, c) for a, b, c in zip(gt_lines, norm_gt_lines, gt_cat_list) if a and b] + + # decompress to three lists + if filtered_lists: + gt_lines_c, norm_gt_lines_c, gt_cat_list_c = zip(*filtered_lists) + + # convert to lists + gt_lines_c = list(gt_lines_c) + norm_gt_lines_c = list(norm_gt_lines_c) + gt_cat_list_c = list(gt_cat_list_c) + else: + gt_lines_c = [] + norm_gt_lines_c = [] + gt_cat_list_c = [] + + # pred's empty values + filtered_lists = [(a, b) for a, b in zip(pred_lines, norm_pred_lines) if a and b] + + # decompress to two lists + if filtered_lists: + pred_lines_c, norm_pred_lines_c = zip(*filtered_lists) + + # convert to lists + pred_lines_c = list(pred_lines_c) + norm_pred_lines_c = list(norm_pred_lines_c) + else: + pred_lines_c = [] + norm_pred_lines_c = [] + + return gt_lines_c, norm_gt_lines_c, gt_cat_list_c, pred_lines_c, norm_pred_lines_c + # return gt_lines, norm_gt_lines, gt_cat_list, pred_lines, norm_pred_lines + + +def match_gt2pred_simple(gt_items, pred_items, line_type, img_name): + + gt_lines, norm_gt_lines, gt_cat_list, pred_lines, norm_pred_lines = get_gt_pred_lines(gt_items, pred_items, line_type) + + match_list = [] + if not norm_gt_lines: # not matched pred should be concatenated + # print("One of the lists is empty. Returning an empty gt result.") + # for pred_idx in range(len(norm_pred_lines)): + pred_idx_list = range(len(norm_pred_lines)) + match_list.append({ + 'gt_idx': [""], + 'gt': "", + 'pred_idx': pred_idx_list, + 'pred': ''.join(pred_lines[_] for _ in pred_idx_list), + 'gt_position': [""], + 'pred_position': pred_items[pred_idx_list[0]]['position'][0], # get the first pred's position + 'norm_gt': "", + 'norm_pred': ''.join(norm_pred_lines[_] for _ in pred_idx_list), + 'gt_category_type': "", + 'pred_category_type': get_pred_category_type(pred_idx_list[0], pred_items), # get the first pred's category + 'gt_attribute': [{}], + 'edit': 1, + 'img_id': img_name + }) + return match_list + elif not norm_pred_lines: # not matched gt should be separated + # print("One of the lists is empty. Returning an empty pred result.") + for gt_idx in range(len(norm_gt_lines)): + match_list.append({ + 'gt_idx': [gt_idx], + 'gt': gt_lines[gt_idx], + 'pred_idx': [""], + 'pred': "", + 'gt_position': [gt_items[gt_idx].get('order') if gt_items[gt_idx].get('order') else gt_items[gt_idx].get('position', [""])[0]], + 'pred_position': "", + 'norm_gt': norm_gt_lines[gt_idx], + 'norm_pred': "", + 'gt_category_type': gt_cat_list[gt_idx], + 'pred_category_type': "", + 'gt_attribute': [gt_items[gt_idx].get("attribute", {})], + 'edit': 1, + 'img_id': img_name + }) + return match_list + + cost_matrix = compute_edit_distance_matrix_new(norm_gt_lines, norm_pred_lines) + + row_ind, col_ind = linear_sum_assignment(cost_matrix) + + + for gt_idx in range(len(norm_gt_lines)): + if gt_idx in row_ind: + row_i = list(row_ind).index(gt_idx) + pred_idx = int(col_ind[row_i]) + pred_line = pred_lines[pred_idx] + norm_pred_line = norm_pred_lines[pred_idx] + edit = cost_matrix[gt_idx][pred_idx] + # print('edit_dist', edit) + # if edit > 0.7: + # print('! Not match') + else: + # print('No match pred') + pred_idx = "" + pred_line = "" + norm_pred_line = "" + edit = 1 + + match_list.append({ + 'gt_idx': [gt_idx], + 'gt': gt_lines[gt_idx], + 'norm_gt': norm_gt_lines[gt_idx], + 'gt_category_type': gt_cat_list[gt_idx], + 'gt_position': [gt_items[gt_idx].get('order') if gt_items[gt_idx].get('order') else gt_items[gt_idx].get('position', [""])[0]], + 'gt_attribute': [gt_items[gt_idx].get("attribute", {})], + 'pred_idx': [pred_idx], + 'pred': pred_line, + 'norm_pred': norm_pred_line, + 'pred_category_type': get_pred_category_type(pred_idx, pred_items) if pred_idx else "", + 'pred_position': pred_items[pred_idx]['position'][0] if pred_idx else "", + 'edit': edit, + 'img_id': img_name + }) + # print('-'*10) + # [([0,1], 0),(2, 1), (1,2)] --> [0,2,1]/[0,1,2] + + pred_idx_list = [pred_idx for pred_idx in range(len(norm_pred_lines)) if pred_idx not in col_ind] # get not matched preds + if pred_idx_list: # if there are still remaining pred_idx, concatenate all preds + match_list.append({ + 'gt_idx': [""], + 'gt': "", + 'pred_idx': pred_idx_list, + 'pred': ''.join(pred_lines[_] for _ in pred_idx_list), + 'gt_position': [""], + 'pred_position': pred_items[pred_idx_list[0]]['position'][0], # get the first pred's position + 'norm_gt': "", + 'norm_pred': ''.join(norm_pred_lines[_] for _ in pred_idx_list), + 'gt_category_type': "", + 'pred_category_type': get_pred_category_type(pred_idx_list[0], pred_items), # get the first pred's category + 'gt_attribute': [{}], + 'edit': 1, + 'img_id': img_name + }) + return match_list + + +def match_gt2pred_no_split(gt_items, pred_items, line_type, img_name): + # directly concatenate gt and pred by position + gt_lines, norm_gt_lines, gt_cat_list, pred_lines, norm_pred_lines = get_gt_pred_lines(gt_items, pred_items, line_type) + gt_line_with_position = [] + for gt_line, norm_gt_line, gt_item in zip(gt_lines, norm_gt_lines, gt_items): + gt_position = gt_item['order'] if gt_item.get('order') else gt_item.get('position', [""])[0] + if gt_position: + gt_line_with_position.append((gt_position, gt_line, norm_gt_line)) + sorted_gt_lines = sorted(gt_line_with_position, key=lambda x: x[0]) + gt = '\n\n'.join([_[1] for _ in sorted_gt_lines]) + norm_gt = '\n\n'.join([_[2] for _ in sorted_gt_lines]) + pred_line_with_position = [(pred_item['position'], pred_line, pred_norm_line) for pred_line, pred_norm_line, pred_item in zip(pred_lines, norm_pred_lines, pred_items)] + sorted_pred_lines = sorted(pred_line_with_position, key=lambda x: x[0]) + pred = '\n\n'.join([_[1] for _ in sorted_pred_lines]) + norm_pred = '\n\n'.join([_[2] for _ in sorted_pred_lines]) + # edit = Levenshtein.distance(norm_gt, norm_pred)/max(len(norm_gt), len(norm_pred)) + if norm_gt or norm_pred: + return [{ + 'gt_idx': [0], + 'gt': gt, + 'norm_gt': norm_gt, + 'gt_category_type': "text_merge", + 'gt_position': [""], + 'gt_attribute': [{}], + 'pred_idx': [0], + 'pred': pred, + 'norm_pred': norm_pred, + 'pred_category_type': "text_merge", + 'pred_position': "", + # 'edit': edit, + 'img_id': img_name + }] + else: + return [] + + +from scipy.optimize import linear_sum_assignment +# from rapidfuzz.distance import Levenshtein +import Levenshtein +from collections import defaultdict +import copy +import pdb +import numpy as np +import evaluate +from collections import Counter +from Levenshtein import distance as Levenshtein_distance + + +def match_gt2pred_quick(gt_items, pred_items, line_type, img_name): + + gt_lines, norm_gt_lines, gt_cat_list, pred_lines, norm_pred_lines= get_gt_pred_lines(gt_items, pred_items, line_type) + all_gt_indices = set(range(len(norm_gt_lines))) + all_pred_indices = set(range(len(norm_pred_lines))) + + if not norm_gt_lines: + match_list = [] + for pred_idx in range(len(norm_pred_lines)): + match_list.append({ + 'gt_idx': [""], + 'gt': "", + 'pred_idx': [pred_idx], + 'pred': pred_lines[pred_idx], + 'gt_position': "", + 'pred_position': pred_items[pred_idx]['position'][0], + 'norm_gt': "", + 'norm_pred': norm_pred_lines[pred_idx], + 'gt_category_type': "", + 'pred_category_type': get_pred_category_type(pred_idx, pred_items), + 'gt_attribute': [{}], + 'edit': 1, + 'img_id': img_name + }) + return match_list + elif not norm_pred_lines: + match_list = [] + for gt_idx in range(len(norm_gt_lines)): + match_list.append({ + 'gt_idx': [gt_idx], + 'gt': gt_lines[gt_idx], + 'pred_idx': [""], + 'pred': "", + 'gt_position': [gt_items[gt_idx].get('order') if gt_items[gt_idx].get('order') else gt_items[gt_idx].get('position', [""])[0]], + 'pred_position': "", + 'norm_gt': norm_gt_lines[gt_idx], + 'norm_pred': "", + 'gt_category_type': gt_cat_list[gt_idx], + 'pred_category_type': "", + 'gt_attribute': [gt_items[gt_idx].get("attribute", {})], + 'edit': 1, + 'img_id': img_name + }) + return match_list + elif len(norm_gt_lines) == 1 and len(norm_pred_lines) == 1: + edit_distance = Levenshtein_distance(norm_gt_lines[0], norm_pred_lines[0]) + normalized_edit_distance = edit_distance / max(len(norm_gt_lines[0]), len(norm_pred_lines[0])) + return [{ + 'gt_idx': [0], + 'gt': gt_lines[0], + 'pred_idx': [0], + 'pred': pred_lines[0], + 'gt_position': [gt_items[0].get('order') if gt_items[0].get('order') else gt_items[0].get('position', [""])[0]], + 'pred_position': pred_items[0]['position'][0], + 'norm_gt': norm_gt_lines[0], + 'norm_pred': norm_pred_lines[0], + 'gt_category_type': gt_cat_list[0], + 'pred_category_type': get_pred_category_type(0, pred_items), + 'gt_attribute': [gt_items[0].get("attribute", {})], + 'edit': normalized_edit_distance, + 'img_id': img_name + }] + + cost_matrix = compute_edit_distance_matrix_new(norm_gt_lines, norm_pred_lines) + + matched_col_idx, row_ind, cost_list = cal_final_match(cost_matrix, norm_gt_lines, norm_pred_lines) + + gt_lens_dict, pred_lens_dict = initialize_indices(norm_gt_lines, norm_pred_lines) + + matches, unmatched_gt_indices, unmatched_pred_indices = process_matches(matched_col_idx, row_ind, cost_list, norm_gt_lines, norm_pred_lines, pred_lines) + + matching_dict = fuzzy_match_unmatched_items(unmatched_gt_indices, norm_gt_lines, norm_pred_lines) + + final_matches = merge_matches(matches, matching_dict) + + recalculate_edit_distances(final_matches, gt_lens_dict, norm_gt_lines, norm_pred_lines) + + converted_results = convert_final_matches(final_matches, norm_gt_lines, norm_pred_lines) + + merged_results = merge_duplicates_add_unmatched(converted_results, norm_gt_lines, norm_pred_lines, gt_lines, pred_lines, all_gt_indices, all_pred_indices) + + for entry in merged_results: + entry['gt_idx'] = [entry['gt_idx']] if not isinstance(entry['gt_idx'], list) else entry['gt_idx'] + entry['pred_idx'] = [entry['pred_idx']] if not isinstance(entry['pred_idx'], list) else entry['pred_idx'] + entry['gt_position'] = [gt_items[_].get('order') if gt_items[_].get('order') else gt_items[_].get('position', [""])[0] for _ in entry['gt_idx']] if entry['gt_idx'] != [""] else [""] + entry['pred_position'] = pred_items[entry['pred_idx'][0]]['position'][0] if entry['pred_idx'] != [""] else "" + entry['gt'] = ''.join([gt_lines[_] for _ in entry['gt_idx']]) if entry['gt_idx'] != [""] else "" + entry['pred'] = ''.join([pred_lines[_] for _ in entry['pred_idx']]) if entry['pred_idx'] != [""] else "" + entry['norm_gt'] = ''.join([norm_gt_lines[_] for _ in entry['gt_idx']]) if entry['gt_idx'] != [""] else "" + entry['norm_pred'] = ''.join([norm_pred_lines[_] for _ in entry['pred_idx']]) if entry['pred_idx'] != [""] else "" + + if entry['gt_idx'] != [""]: + ignore_type = ['figure_caption', 'figure_footnote', 'table_caption', 'table_footnote', 'code_algorithm', 'code_algorithm_caption', 'header', 'footer', 'page_footnote', 'page_number', 'equation_caption'] + gt_cagegory_clean = [gt_cat_list[_] for _ in entry['gt_idx'] if gt_cat_list[_] not in ignore_type] + if gt_cagegory_clean: + entry['gt_category_type'] = Counter(gt_cagegory_clean).most_common(1)[0][0] + else: + entry['gt_category_type'] = Counter([gt_cat_list[_] for _ in entry['gt_idx']]).most_common(1)[0][0] + else: + entry['gt_category_type'] = "" + entry['pred_category_type'] = get_pred_category_type(entry['pred_idx'][0], pred_items) if entry['pred_idx'] != [""] else "" + entry['gt_attribute'] = [gt_items[_].get("attribute", {}) for _ in entry['gt_idx']] if entry['gt_idx'] != [""] else [{}] + entry['img_id'] = img_name + + return merged_results + + +def merge_duplicates_add_unmatched(converted_results, norm_gt_lines, norm_pred_lines, gt_lines, pred_lines, all_gt_indices, all_pred_indices): + merged_results = [] + processed_pred = set() + processed_gt = set() + + for entry in converted_results: + pred_idx = tuple(entry['pred_idx']) if isinstance(entry['pred_idx'], list) else (entry['pred_idx'],) + if pred_idx not in processed_pred and pred_idx != ("",): + merged_entry = { + 'gt_idx': [entry['gt_idx']], + 'gt': entry['gt'], + 'pred_idx': entry['pred_idx'], + 'pred': entry['pred'], + 'edit': entry['edit'] + } + for other_entry in converted_results: + other_pred_idx = tuple(other_entry['pred_idx']) if isinstance(other_entry['pred_idx'], list) else (other_entry['pred_idx'],) + if other_pred_idx == pred_idx and other_entry is not entry: + merged_entry['gt_idx'].append(other_entry['gt_idx']) + merged_entry['gt'] += other_entry['gt'] + processed_gt.add(other_entry['gt_idx']) + merged_results.append(merged_entry) + processed_pred.add(pred_idx) + processed_gt.add(entry['gt_idx']) + + for entry in converted_results: + if entry['gt_idx'] not in processed_gt: + merged_results.append(entry) + + for gt_idx in range(len(norm_gt_lines)): + if gt_idx not in processed_gt: + merged_results.append({ + 'gt_idx': [gt_idx], + 'gt': gt_lines[gt_idx], + 'pred_idx': [""], + 'pred': "", + 'edit': 1 + }) + return merged_results + + + + +def formula_format(formula_matches, img_name): + return [ + { + "gt": item["gt"], + "pred": item["pred"], + "img_id": f"{img_name}_{i}" + } + for i, item in enumerate(formula_matches) + ] + + +def merge_lists_with_sublists(main_list, sub_lists): + main_list_final = list(copy.deepcopy(main_list)) + for sub_list in sub_lists: + pop_idx = main_list_final.index(sub_list[0]) + for _ in sub_list: + main_list_final.pop(pop_idx) + main_list_final.insert(pop_idx, sub_list) + return main_list_final + + +def sub_pred_fuzzy_matching(gt, pred): + + min_d = float('inf') + # pos = -1 + + gt_len = len(gt) + pred_len = len(pred) + + if gt_len >= pred_len and pred_len > 0: + for i in range(gt_len - pred_len + 1): + sub = gt[i:i + pred_len] + dist = Levenshtein_distance(sub, pred)/pred_len + if dist < min_d: + min_d = dist + pos = i + + return min_d + else: + return False + +def sub_gt_fuzzy_matching(pred, gt): + + min_d = float('inf') + pos = "" + matched_sub = "" + gt_len = len(gt) + pred_len = len(pred) + + if pred_len >= gt_len and gt_len > 0: + for i in range(pred_len - gt_len + 1): + sub = pred[i:i + gt_len] + dist = Levenshtein.distance(sub, gt) /gt_len + if dist < min_d: + min_d = dist + pos = i + matched_sub = sub + return min_d, pos, gt_len, matched_sub + else: + return 1, "", gt_len, "" + + +def get_final_subset(subset_certain, subset_certain_cost): + if not subset_certain or not subset_certain_cost: + return [] + + subset_turple = sorted([(a, b) for a, b in zip(subset_certain, subset_certain_cost)], key=lambda x: x[0][0]) + + group_list = defaultdict(list) + group_idx = 0 + group_list[group_idx].append(subset_turple[0]) + + for item in subset_turple[1:]: + overlap_flag = False + for subset in group_list[group_idx]: + for idx in item[0]: + if idx in subset[0]: + overlap_flag = True + break + if overlap_flag: + break + if overlap_flag: + group_list[group_idx].append(item) + else: + group_idx += 1 + group_list[group_idx].append(item) + + final_subset = [] + for _, group in group_list.items(): + if len(group) == 1: + final_subset.append(group[0][0]) + else: + path_dict = defaultdict(list) + path_idx = 0 + path_dict[path_idx].append(group[0]) + + for subset in group[1:]: + new_path = True + for path_idx_s, path_items in path_dict.items(): + is_dup = False + is_same = False + for path_item in path_items: + if path_item[0] == subset[0]: + is_dup = True + is_same = True + if path_item[1] > subset[1]: + path_dict[path_idx_s].pop(path_dict[path_idx_s].index(path_item)) + path_dict[path_idx_s].append(subset) + else: + for num_1 in path_item[0]: + for num_2 in subset[0]: + if num_1 == num_2: + is_dup = True + if not is_dup: + path_dict[path_idx_s].append(subset) + new_path = False + if is_same: + new_path = False + if new_path: + path_idx = len(path_dict.keys()) + path_dict[path_idx].append(subset) + + saved_cost = float('inf') + saved_subset = [] + for path_idx, path in path_dict.items(): + avg_cost = sum([i[1] for i in path]) / len(path) + if avg_cost < saved_cost: + saved_subset = [i[0] for i in path] + saved_cost = avg_cost + + final_subset.extend(saved_subset) + + return final_subset + +def judge_pred_merge(gt_list, pred_list, threshold=0.6): + if len(pred_list) == 1: + return False, False + + cur_pred = ' '.join(pred_list[:-1]) + merged_pred = ' '.join(pred_list) + + cur_dist = Levenshtein.distance(gt_list[0], cur_pred) / max(len(gt_list[0]), len(cur_pred)) + merged_dist = Levenshtein.distance(gt_list[0], merged_pred) / max(len(gt_list[0]), len(merged_pred)) + + if merged_dist > cur_dist: + return False, False + + cur_fuzzy_dists = [sub_pred_fuzzy_matching(gt_list[0], cur_pred) for cur_pred in pred_list[:-1]] + if any(dist is False or dist > threshold for dist in cur_fuzzy_dists): + return False, False + + add_fuzzy_dist = sub_pred_fuzzy_matching(gt_list[0], pred_list[-1]) + if add_fuzzy_dist is False: + return False, False + + merged_pred_flag = add_fuzzy_dist < threshold + continue_flag = len(merged_pred) <= len(gt_list[0]) + + return merged_pred_flag, continue_flag + +def deal_with_truncated(cost_matrix, norm_gt_lines, norm_pred_lines): + matched_first = np.argwhere(cost_matrix < 0.25) + masked_gt_idx = [i[0] for i in matched_first] + unmasked_gt_idx = [i for i in range(cost_matrix.shape[0]) if i not in masked_gt_idx] + masked_pred_idx = [i[1] for i in matched_first] + unmasked_pred_idx = [i for i in range(cost_matrix.shape[1]) if i not in masked_pred_idx] + + merges_gt_dict = {} + merges_pred_dict = {} + merged_gt_subsets = [] + + for gt_idx in unmasked_gt_idx: + check_merge_subset = [] + merged_dist = [] + + for pred_idx in unmasked_pred_idx: + step = 1 + merged_pred = [norm_pred_lines[pred_idx]] + + while True: + if pred_idx + step in masked_pred_idx or pred_idx + step >= len(norm_pred_lines): + break + else: + merged_pred.append(norm_pred_lines[pred_idx + step]) + merged_pred_flag, continue_flag = judge_pred_merge([norm_gt_lines[gt_idx]], merged_pred) + if not merged_pred_flag: + break + else: + step += 1 + if not continue_flag: + break + + check_merge_subset.append(list(range(pred_idx, pred_idx + step))) + matched_line = ' '.join([norm_pred_lines[i] for i in range(pred_idx, pred_idx + step)]) + dist = Levenshtein_distance(norm_gt_lines[gt_idx], matched_line) / max(len(matched_line), len(norm_gt_lines[gt_idx])) + merged_dist.append(dist) + + if not merged_dist: + subset_certain = [] + min_cost_idx = "" + min_cost = float('inf') + else: + min_cost = min(merged_dist) + min_cost_idx = merged_dist.index(min_cost) + subset_certain = check_merge_subset[min_cost_idx] + + merges_gt_dict[gt_idx] = { + 'merge_subset': check_merge_subset, + 'merged_cost': merged_dist, + 'min_cost_idx': min_cost_idx, + 'subset_certain': subset_certain, + 'min_cost': min_cost + } + + subset_certain = [merges_gt_dict[gt_idx]['subset_certain'] for gt_idx in unmasked_gt_idx if merges_gt_dict[gt_idx]['subset_certain']] + subset_certain_cost = [merges_gt_dict[gt_idx]['min_cost'] for gt_idx in unmasked_gt_idx if merges_gt_dict[gt_idx]['subset_certain']] + + subset_certain_final = get_final_subset(subset_certain, subset_certain_cost) + + if not subset_certain_final: + return cost_matrix, norm_pred_lines, range(len(norm_pred_lines)) + + final_pred_idx_list = merge_lists_with_sublists(range(len(norm_pred_lines)), subset_certain_final) + final_norm_pred_lines = [' '.join(norm_pred_lines[idx_list[0]:idx_list[-1]+1]) if isinstance(idx_list, list) else norm_pred_lines[idx_list] for idx_list in final_pred_idx_list] + + new_cost_matrix = compute_edit_distance_matrix_new(norm_gt_lines, final_norm_pred_lines) + + return new_cost_matrix, final_norm_pred_lines, final_pred_idx_list + +def cal_move_dist(gt, pred): + assert len(gt) == len(pred), 'Not right length' + step = 0 + for i, gt_c in enumerate(gt): + if gt_c != pred[i]: + step += abs(i - pred.index(gt_c)) + pred[i], pred[pred.index(gt_c)] = pred[pred.index(gt_c)], pred[i] + return step / len(gt) + +def cal_final_match(cost_matrix, norm_gt_lines, norm_pred_lines): + min_indice = cost_matrix.argmax(axis=1) + + new_cost_matrix, final_norm_pred_lines, final_pred_idx_list = deal_with_truncated(cost_matrix, norm_gt_lines, norm_pred_lines) + + row_ind, col_ind = linear_sum_assignment(new_cost_matrix) + + cost_list = [new_cost_matrix[r][c] for r, c in zip(row_ind, col_ind)] + matched_col_idx = [final_pred_idx_list[i] for i in col_ind] + + return matched_col_idx, row_ind, cost_list + +def initialize_indices(norm_gt_lines, norm_pred_lines): + gt_lens_dict = {idx: len(gt_line) for idx, gt_line in enumerate(norm_gt_lines)} + pred_lens_dict = {idx: len(pred_line) for idx, pred_line in enumerate(norm_pred_lines)} + return gt_lens_dict, pred_lens_dict + +def process_matches(matched_col_idx, row_ind, cost_list, norm_gt_lines, norm_pred_lines, pred_lines): + matches = {} + unmatched_gt_indices = [] + unmatched_pred_indices = [] + + for i in range(len(norm_gt_lines)): + if i in row_ind: + idx = list(row_ind).index(i) + pred_idx = matched_col_idx[idx] + + if pred_idx is None or (isinstance(pred_idx, list) and None in pred_idx): + unmatched_pred_indices.append(pred_idx) + continue + + if isinstance(pred_idx, list): + pred_line = ' | '.join(norm_pred_lines[pred_idx[0]:pred_idx[-1]+1]) + ori_pred_line = ' | '.join(pred_lines[pred_idx[0]:pred_idx[-1]+1]) + matched_pred_indices_range = list(range(pred_idx[0], pred_idx[-1]+1)) + else: + pred_line = norm_pred_lines[pred_idx] + ori_pred_line = pred_lines[pred_idx] + matched_pred_indices_range = [pred_idx] + + edit = cost_list[idx] + + if edit > 0.7: + unmatched_pred_indices.extend(matched_pred_indices_range) + unmatched_gt_indices.append(i) + else: + matches[i] = { + 'pred_indices': matched_pred_indices_range, + 'edit_distance': edit, + } + for matched_pred_idx in matched_pred_indices_range: + if matched_pred_idx in unmatched_pred_indices: + unmatched_pred_indices.remove(matched_pred_idx) + else: + unmatched_gt_indices.append(i) + + return matches, unmatched_gt_indices, unmatched_pred_indices + +def fuzzy_match_unmatched_items(unmatched_gt_indices, norm_gt_lines, norm_pred_lines): + matching_dict = {} + + for pred_idx, pred_content in enumerate(norm_pred_lines): + if isinstance(pred_idx, list): + continue + + matching_indices = [] + + for unmatched_gt_idx in unmatched_gt_indices: + gt_content = norm_gt_lines[unmatched_gt_idx] + cur_fuzzy_dist_unmatch, cur_pos, gt_lens, matched_field = sub_gt_fuzzy_matching(pred_content, gt_content) + if cur_fuzzy_dist_unmatch < 0.4: + matching_indices.append(unmatched_gt_idx) + + if matching_indices: + matching_dict[pred_idx] = matching_indices + + return matching_dict + +def merge_matches(matches, matching_dict): + final_matches = {} + processed_gt_indices = set() + + for gt_idx, match_info in matches.items(): + pred_indices = match_info['pred_indices'] + edit_distance = match_info['edit_distance'] + + pred_key = tuple(sorted(pred_indices)) + + if pred_key in final_matches: + if gt_idx not in processed_gt_indices: + final_matches[pred_key]['gt_indices'].append(gt_idx) + processed_gt_indices.add(gt_idx) + else: + final_matches[pred_key] = { + 'gt_indices': [gt_idx], + 'edit_distance': edit_distance + } + processed_gt_indices.add(gt_idx) + + for pred_idx, gt_indices in matching_dict.items(): + pred_key = (pred_idx,) if not isinstance(pred_idx, (list, tuple)) else tuple(sorted(pred_idx)) + + if pred_key in final_matches: + for gt_idx in gt_indices: + if gt_idx not in processed_gt_indices: + final_matches[pred_key]['gt_indices'].append(gt_idx) + processed_gt_indices.add(gt_idx) + else: + final_matches[pred_key] = { + 'gt_indices': [gt_idx for gt_idx in gt_indices if gt_idx not in processed_gt_indices], + 'edit_distance': None + } + processed_gt_indices.update(final_matches[pred_key]['gt_indices']) + + return final_matches + + + +def recalculate_edit_distances(final_matches, gt_lens_dict, norm_gt_lines, norm_pred_lines): + for pred_key, info in final_matches.items(): + gt_indices = sorted(set(info['gt_indices'])) + + if not gt_indices: + info['edit_distance'] = 1 + continue + + if len(gt_indices) > 1: + merged_gt_content = ''.join(norm_gt_lines[gt_idx] for gt_idx in gt_indices) + pred_content = norm_pred_lines[pred_key[0]] if isinstance(pred_key[0], int) else '' + + try: + edit_distance = Levenshtein_distance(merged_gt_content, pred_content) + normalized_edit_distance = edit_distance / max(len(merged_gt_content), len(pred_content)) + except ZeroDivisionError: + normalized_edit_distance = 1 + + info['edit_distance'] = normalized_edit_distance + else: + gt_idx = gt_indices[0] + pred_content = ' '.join(norm_pred_lines[pred_idx] for pred_idx in pred_key if isinstance(pred_idx, int)) + + try: + edit_distance = Levenshtein_distance(norm_gt_lines[gt_idx], pred_content) + normalized_edit_distance = edit_distance / max(len(norm_gt_lines[gt_idx]), len(pred_content)) + except ZeroDivisionError: + normalized_edit_distance = 1 + + info['edit_distance'] = normalized_edit_distance + info['pred_content'] = pred_content + + +def convert_final_matches(final_matches, norm_gt_lines, norm_pred_lines): + converted_results = [] + + all_gt_indices = set(range(len(norm_gt_lines))) + all_pred_indices = set(range(len(norm_pred_lines))) + + for pred_key, info in final_matches.items(): + pred_content = ' '.join(norm_pred_lines[pred_idx] for pred_idx in pred_key if isinstance(pred_idx, int)) + + for gt_idx in sorted(set(info['gt_indices'])): + result_entry = { + 'gt_idx': int(gt_idx), + 'gt': norm_gt_lines[gt_idx], + 'pred_idx': list(pred_key), + 'pred': pred_content, + 'edit': info['edit_distance'] + } + converted_results.append(result_entry) + + matched_gt_indices = set().union(*[set(info['gt_indices']) for info in final_matches.values()]) + unmatched_gt_indices = all_gt_indices - matched_gt_indices + matched_pred_indices = set(idx for pred_key in final_matches.keys() for idx in pred_key if isinstance(idx, int)) + unmatched_pred_indices = all_pred_indices - matched_pred_indices + + if unmatched_pred_indices: + if unmatched_gt_indices: + distance_matrix = [ + [Levenshtein_distance(norm_gt_lines[gt_idx], norm_pred_lines[pred_idx]) for pred_idx in unmatched_pred_indices] + for gt_idx in unmatched_gt_indices + ] + + row_ind, col_ind = linear_sum_assignment(distance_matrix) + + for i, j in zip(row_ind, col_ind): + gt_idx = list(unmatched_gt_indices)[i] + pred_idx = list(unmatched_pred_indices)[j] + result_entry = { + 'gt_idx': int(gt_idx), + 'gt': norm_gt_lines[gt_idx], + 'pred_idx': [pred_idx], + 'pred': norm_pred_lines[pred_idx], + 'edit': 1 + } + converted_results.append(result_entry) + + matched_gt_indices.update(list(unmatched_gt_indices)[i] for i in row_ind) + else: + result_entry = { + 'gt_idx': "", + 'gt': '', + 'pred_idx': list(unmatched_pred_indices), + 'pred': ' '.join(norm_pred_lines[pred_idx] for pred_idx in unmatched_pred_indices), + 'edit': 1 + } + converted_results.append(result_entry) + else: + for gt_idx in unmatched_gt_indices: + result_entry = { + 'gt_idx': int(gt_idx), + 'gt': norm_gt_lines[gt_idx], + 'pred_idx': "", + 'pred': '', + 'edit': 1 + } + converted_results.append(result_entry) + + return converted_results + +import json + +def read_md_file(filepath): + with open(filepath, 'r', encoding='utf-8') as file: + content = file.read() + + return content + +def save_paired_result(preds, gts, save_path): + save_result = [] + formula_id = 0 + for gt, pred in zip(gts, preds): + save_result.append({ + "gt": gt, + "pred": pred, + "img_id": formula_id + }) + formula_id += 1 + with open(save_path, 'w', encoding='utf-8') as f: + json.dump(save_result, f, indent=4, ensure_ascii=False) + + +import matplotlib.pyplot as plt +import numpy as np +import os +import re +import matplotlib.font_manager as fm +font = fm.FontProperties(fname=r'font/SimHei.ttf') + + +def print_aligned_dict(data): + # Find the maximum length of all keys + max_key_length = max(len(key) for key in data['testcase1']) + + # Print header + print(f"{' ' * (max_key_length + 4)}", end="") + for key in data: + print(f"{key:>{max_key_length}}", end="") + print() + + # Print dictionary content + for subkey in data['testcase1']: + print(f"{subkey:<{max_key_length + 4}}", end="") + for key in data: + print(f"{data[key][subkey]:>{max_key_length}}", end="") + print() +def create_dict_from_folders(directory): + body = {} + for folder_name in os.listdir(directory): + folder_path = os.path.join(directory, folder_name) + if os.path.isdir(folder_path): + body[folder_name] = {} + return body + + +def create_radar_chart(df, title, filename): + labels = df.columns + + # Calculate angles + angles = np.linspace(0, 2 * np.pi, len(labels), endpoint=False).tolist() + angles += angles[:1] + + # Initialize radar chart + fig, ax = plt.subplots(figsize=(10, 6), subplot_kw=dict(polar=True), dpi=200) + # ax.spines['polar'].set_visible(False) + + # Draw radar chart for each dataset + for index, row in df.iterrows(): + values = row.tolist() + values += values[:1] + ax.fill(angles, values, alpha=0.1) + ax.plot(angles, values, label=index) + + # Add percentage labels next to each data point + for angle, value in zip(angles, values): + ax.text(angle, value, '{:.1%}'.format(value), ha='center', va='center', fontsize=7, alpha=0.7) + + # Set labels + ax.set_yticklabels([]) + ax.set_xticks(angles[:-1]) + ax.set_xticklabels(labels, fontproperties=font) + ax.spines['polar'].set_visible(False) # Hide the outermost circle + ax.grid(False) + for j in np.arange(0, 1.2, 0.2): + ax.plot(angles, len(values) * [j], '-.', lw=0.5, color='black', alpha=0.5) + for j in range(len(values)): + ax.plot([angles[j], angles[j]], [0, 1], '-.', lw=0.5, color='black', alpha=0.5) + + # Add title and legend + plt.legend(loc='upper right', bbox_to_anchor=(0.1, 0.1)) + + ax.tick_params(pad=30) + ax.set_theta_zero_location('N') + # Save chart to file + plt.savefig(filename) + +# The function is from https://github.com/intsig-textin/markdown_tester +def markdown_to_html(markdown_table): + rows = [row.strip() for row in markdown_table.strip().split('\n')] + num_columns = len(rows[0].split('|')) - 2 + + html_table = '\n \n \n' + + header_cells = [cell.strip() for cell in rows[0].split('|')[1:-1]] + for cell in header_cells: + html_table += f' \n' + html_table += ' \n \n \n' + + for row in rows[2:]: + cells = [cell.strip() for cell in row.split('|')[1:-1]] + html_table += ' \n' + for cell in cells: + html_table += f' \n' + html_table += ' \n' + + html_table += ' \n
{cell}
{cell}
\n' + return html_table +def convert_markdown_to_html(self, markdown_content, md_type): + # Define a regex pattern to find Markdown tables with newlines + markdown_content = markdown_content.replace('\r', '') + pattern = re.compile(r'\|\s*.*?\s*\|\n', re.DOTALL) + + # Find all matches in the Markdown content + matches = pattern.findall(markdown_content) + for match in matches: + html_table = markdown_to_html(match) + markdown_content = markdown_content.replace(match, html_table, 1) # Only replace the first occurrence + res_html = convert_table(replace_table_with_placeholder(markdown_content)) + + return res_html +def convert_table_str(s): + s = re.sub(r'','',s) + s = re.sub(r'','',s) + # s = re.sub(r'
',lambda x:f'',s) + # s = re.sub(r'',lambda x:f'',s) + res = '' + res += '\n\n' + temp_item = '' + for c in s: + temp_item += c + if c == '>' and not re.search(r'\$',temp_item): + res += temp_item+'\n' + temp_item = '' + return res+'\n' +def merge_table(md): + table_temp = '' + for line in md: + table_temp += line + return convert_table_str(table_temp) +def find_md_table_mode(line): + if re.search(r'-*?:',line) or re.search(r'---',line) or re.search(r':-*?',line): + return True + return False +def delete_table_and_body(input_list): + res = [] + for line in input_list: + if not re.search(r'',line): + res.append(line) + return res +def merge_tables(input_str): + # Delete HTML comments + input_str = re.sub(r'', '', input_str) + + # Use regex to find each block + table_blocks = re.findall(r'
[\s\S]*?
', input_str) + + # Process each block, replace ') + final_tr = delete_table_and_body(block_lines) + if len(final_tr) > 2: + output_lines.extend(final_tr) # Ignore
with + output_lines = [] + for block in table_blocks: + block_lines = block.split('\n') + for i, line in enumerate(block_lines): + if '' in line: + block_lines[i] = line.replace('', '').replace('', '
and
tags, keep only table content + + # Rejoin the processed strings + merged_output = '\n{}\n
'.format('\n'.join(output_lines)) + + return "\n\n" + merged_output + "\n\n" + +def replace_table_with_placeholder(input_string): + lines = input_string.split('\n') + output_lines = [] + + in_table_block = False + temp_block = "" + last_line = "" + + org_table_list = [] + in_org_table = False + + for idx, line in enumerate(lines): + # if not in_org_table: + # if "" not in last_line and in_table_block == False and temp_block != "": + # output_lines.append(merge_tables(temp_block)) + # temp_block = "" + if "
" in line: + # if "
" not in last_line: + temp_block += "\n" + last_line + if "
" in last_line: + if "" not in line: + in_table_block = False + output_lines.append(merge_tables(temp_block)) + temp_block = "" + else: + output_lines.append(last_line) + + last_line = line + # else: + # org_table_list.append(line) + # if "" in last_line: + temp_block += "\n" + last_line + output_lines.append(merge_tables(temp_block)) + else: + output_lines.append(last_line) + # if "
" in last_line: + # output_lines.append(merge_tables(temp_block)) + + return '\n'.join(output_lines) + +def convert_table(input_str): + # Replace + output_str = input_str.replace("
", "
") + + # Replace
+ output_str = output_str.replace("", "") + + return output_str + +def convert_markdown_to_html(markdown_content): + # Define a regex pattern to find Markdown tables with newlines + markdown_content = markdown_content.replace('\r', '')+'\n' + pattern = re.compile(r'\|\s*.*?\s*\|\n', re.DOTALL) + + # Find all matches in the Markdown content + matches = pattern.findall(markdown_content) + + for match in matches: + html_table = markdown_to_html(match) + markdown_content = markdown_content.replace(match, html_table, 1) # Only replace the first occurrence + + res_html = convert_table(replace_table_with_placeholder(markdown_content)) + + return res_html \ No newline at end of file diff --git a/vlmeval/dataset/__init__.py b/vlmeval/dataset/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..2d6532b812c45873cf02f15dfd958aa463f79c74 --- /dev/null +++ b/vlmeval/dataset/__init__.py @@ -0,0 +1,259 @@ +import warnings + +from .image_base import img_root_map, ImageBaseDataset +from .image_caption import ImageCaptionDataset +from .image_yorn import ImageYORNDataset +from .image_mcq import ( + ImageMCQDataset, MMMUDataset, CustomMCQDataset, MUIRDataset, GMAIMMBenchDataset, MMERealWorld, HRBenchDataset, + NaturalBenchDataset, WeMath, MMMUProDataset, VMCBenchDataset +) +from .image_mt import MMDUDataset +from .image_vqa import ( + ImageVQADataset, MathVision, OCRBench, MathVista, LLaVABench, MMVet, MTVQADataset, TableVQABench, + CustomVQADataset, CRPE, MathVerse, OlympiadBench, QSpatial, VizWiz, MMNIAH, LogicVista, MME_CoT +) + +from .image_ccocr import CCOCRDataset +from .image_shortqa import ImageShortQADataset +from .text_mcq import CustomTextMCQDataset, TextMCQDataset + +from .vcr import VCRDataset +from .mmlongbench import MMLongBench +from .dude import DUDE +from .slidevqa import SlideVQA +from .vl_rewardbench import VLRewardBench +from .vlm2bench import VLM2Bench + +from .mmdocbench import MMDocBench + +from .mmbench_video import MMBenchVideo +from .videomme import VideoMME +from .mvbench import MVBench, MVBench_MP4 +from .tamperbench import MVTamperBench +from .miabench import MIABench +from .mlvu import MLVU, MLVU_MCQ, MLVU_OpenEnded +from .tempcompass import TempCompass, TempCompass_Captioning, TempCompass_MCQ, TempCompass_YorN +from .longvideobench import LongVideoBench +from .video_concat_dataset import ConcatVideoDataset +from .mmgenbench import MMGenBench +from .cgbench import CGBench_MCQ_Grounding_Mini, CGBench_OpenEnded_Mini, CGBench_MCQ_Grounding, CGBench_OpenEnded +from .megabench import MEGABench +from .moviechat1k import MovieChat1k +from .vdc import VDC + +from .worldsense import WorldSense +from .qbench_video import QBench_Video, QBench_Video_MCQ, QBench_Video_VQA + +from .miabench import MIABench +from .cmmmu import CMMMU +from .emma import EMMADataset +from .wildvision import WildVision +from .mmmath import MMMath +from .dynamath import Dynamath +from .creation import CreationMMBenchDataset +from .mmalignbench import MMAlignBench +from .utils import * +from .video_dataset_config import * +from ..smp import * +from .Omnidocbench.omnidocbench import OmniDocBench +from .moat import MOAT + + +class ConcatDataset(ImageBaseDataset): + # This dataset takes multiple dataset names as input and aggregate them into a single dataset. + # Each single dataset should not have a field named `SUB_DATASET` + + DATASET_SETS = { + 'MMMB': ['MMMB_ar', 'MMMB_cn', 'MMMB_en', 'MMMB_pt', 'MMMB_ru', 'MMMB_tr'], + 'MTL_MMBench_DEV': [ + 'MMBench_dev_ar', 'MMBench_dev_cn', 'MMBench_dev_en', + 'MMBench_dev_pt', 'MMBench_dev_ru', 'MMBench_dev_tr' + ], + } + + def __init__(self, dataset): + datasets = self.DATASET_SETS[dataset] + self.dataset_map = {} + # The name of the compliation + self.dataset_name = dataset + self.datasets = datasets + for dname in datasets: + dataset = build_dataset(dname) + assert dataset is not None, dataset + self.dataset_map[dname] = dataset + TYPES = [x.TYPE for x in self.dataset_map.values()] + MODALITIES = [x.MODALITY for x in self.dataset_map.values()] + assert np.all([x == TYPES[0] for x in TYPES]), (datasets, TYPES) + assert np.all([x == MODALITIES[0] for x in MODALITIES]), (datasets, MODALITIES) + self.TYPE = TYPES[0] + self.MODALITY = MODALITIES[0] + data_all = [] + for dname in datasets: + data = self.dataset_map[dname].data + data['SUB_DATASET'] = [dname] * len(data) + data_new = localize_df(data, dname, nproc=16) + data_all.append(data_new) + + data = pd.concat(data_all) + data['original_index'] = data.pop('index') + data['index'] = np.arange(len(data)) + self.data = data + + def build_prompt(self, line): + if isinstance(line, int): + line = self.data.iloc[line] + idx = line['original_index'] + dname = line['SUB_DATASET'] + org_data = self.dataset_map[dname].data + org_line = cp.deepcopy(org_data[org_data['index'] == idx]).iloc[0] + return self.dataset_map[dname].build_prompt(org_line) + + def dump_image(self, line): + # Assert all images are pre-dumped + assert 'image' not in line + assert 'image_path' in line + tgt_path = toliststr(line['image_path']) + return tgt_path + + @classmethod + def supported_datasets(cls): + return list(cls.DATASET_SETS) + + def evaluate(self, eval_file, **judge_kwargs): + suffix = eval_file.split('.')[-1] + # First, split the eval_file by dataset + data_all = load(eval_file) + for dname in self.datasets: + tgt = eval_file.replace(self.dataset_name, dname) + data_sub = data_all[data_all['SUB_DATASET'] == dname] + data_sub.pop('index') + data_sub['index'] = data_sub.pop('original_index') + data_sub.pop('SUB_DATASET') + dump(data_sub, tgt) + # Then, evaluate each dataset separately + results_all = [] + for dname in self.datasets: + tgt = eval_file.replace(self.dataset_name, dname) + res = self.dataset_map[dname].evaluate(tgt, **judge_kwargs) + assert isinstance(res, pd.DataFrame) + res['DATASET'] = [dname] * len(res) + results_all.append(res) + result = pd.concat(results_all) + score_file = eval_file.replace(f'.{suffix}', '_acc.csv') + dump(result, score_file) + return result + + +# Add new supported dataset class here +IMAGE_DATASET = [ + ImageCaptionDataset, ImageYORNDataset, ImageMCQDataset, ImageVQADataset, + MathVision, MMMUDataset, OCRBench, MathVista, LLaVABench, MMVet, + MTVQADataset, TableVQABench, MMLongBench, VCRDataset, MMDUDataset, DUDE, + SlideVQA, MUIRDataset, CCOCRDataset, GMAIMMBenchDataset, MMERealWorld, + HRBenchDataset, CRPE, MathVerse, NaturalBenchDataset, MIABench, + OlympiadBench, WildVision, MMMath, QSpatial, Dynamath, MMGenBench, VizWiz, + MMNIAH, CMMMU, VLRewardBench, WeMath, LogicVista, MMMUProDataset, + CreationMMBenchDataset, ImageShortQADataset, MMAlignBench, OmniDocBench, + VLM2Bench, VMCBenchDataset, EMMADataset, MME_CoT, MOAT, MMDocBench +] + + +VIDEO_DATASET = [ + MMBenchVideo, VideoMME, MVBench, MVBench_MP4, MVTamperBench, LongVideoBench, WorldSense, VDC, MovieChat1k, + MLVU, MLVU_MCQ, MLVU_OpenEnded, + TempCompass, TempCompass_MCQ, TempCompass_Captioning, TempCompass_YorN, + CGBench_MCQ_Grounding_Mini, CGBench_OpenEnded_Mini, CGBench_MCQ_Grounding, CGBench_OpenEnded, + MEGABench, WorldSense, QBench_Video, QBench_Video_MCQ, QBench_Video_VQA +] + +TEXT_DATASET = [ + TextMCQDataset +] + +CUSTOM_DATASET = [ + CustomMCQDataset, CustomVQADataset, CustomTextMCQDataset +] + +DATASET_COLLECTION = [ConcatDataset, ConcatVideoDataset] + +DATASET_CLASSES = IMAGE_DATASET + VIDEO_DATASET + TEXT_DATASET + CUSTOM_DATASET + DATASET_COLLECTION # noqa: E501 +SUPPORTED_DATASETS = [] +for DATASET_CLS in DATASET_CLASSES: + SUPPORTED_DATASETS.extend(DATASET_CLS.supported_datasets()) + + +def DATASET_TYPE(dataset, *, default: str = 'MCQ') -> str: + for cls in DATASET_CLASSES: + if dataset in cls.supported_datasets(): + if hasattr(cls, 'TYPE'): + return cls.TYPE + # Have to add specific routine to handle ConcatDataset + if dataset in ConcatDataset.DATASET_SETS: + dataset_list = ConcatDataset.DATASET_SETS[dataset] + TYPES = [DATASET_TYPE(dname) for dname in dataset_list] + assert np.all([x == TYPES[0] for x in TYPES]), (dataset_list, TYPES) + return TYPES[0] + + if 'openended' in dataset.lower(): + return 'VQA' + warnings.warn(f'Dataset {dataset} is a custom one and not annotated as `openended`, will treat as {default}. ') # noqa: E501 + return default + + +def DATASET_MODALITY(dataset, *, default: str = 'IMAGE') -> str: + if dataset is None: + warnings.warn(f'Dataset is not specified, will treat modality as {default}. ') + return default + for cls in DATASET_CLASSES: + if dataset in cls.supported_datasets(): + if hasattr(cls, 'MODALITY'): + return cls.MODALITY + # Have to add specific routine to handle ConcatDataset + if dataset in ConcatDataset.DATASET_SETS: + dataset_list = ConcatDataset.DATASET_SETS[dataset] + MODALITIES = [DATASET_MODALITY(dname) for dname in dataset_list] + assert np.all([x == MODALITIES[0] for x in MODALITIES]), (dataset_list, MODALITIES) + return MODALITIES[0] + + if 'VIDEO' in dataset.lower(): + return 'VIDEO' + elif 'IMAGE' in dataset.lower(): + return 'IMAGE' + warnings.warn(f'Dataset {dataset} is a custom one, will treat modality as {default}. ') + return default + + +def build_dataset(dataset_name, **kwargs): + for cls in DATASET_CLASSES: + if dataset_name in supported_video_datasets: + return supported_video_datasets[dataset_name](**kwargs) + elif dataset_name in cls.supported_datasets(): + return cls(dataset=dataset_name, **kwargs) + + warnings.warn(f'Dataset {dataset_name} is not officially supported. ') + + data_file = osp.join(LMUDataRoot(), f'{dataset_name}.tsv') + if not osp.exists(data_file): + warnings.warn(f'Data file {data_file} does not exist. Dataset building failed. ') + return None + + data = load(data_file) + if 'question' not in [x.lower() for x in data.columns]: + warnings.warn(f'Data file {data_file} does not have a `question` column. Dataset building failed. ') + return None + + if 'A' in data and 'B' in data: + if 'image' in data or 'image_path' in data: + warnings.warn(f'Will assume unsupported dataset {dataset_name} as a Custom MCQ dataset. ') + return CustomMCQDataset(dataset=dataset_name, **kwargs) + else: + warnings.warn(f'Will assume unsupported dataset {dataset_name} as a Custom Text MCQ dataset. ') + return CustomTextMCQDataset(dataset=dataset_name, **kwargs) + else: + warnings.warn(f'Will assume unsupported dataset {dataset_name} as a Custom VQA dataset. ') + return CustomVQADataset(dataset=dataset_name, **kwargs) + + +__all__ = [ + 'build_dataset', 'img_root_map', 'build_judge', 'extract_answer_from_item', 'prefetch_answer', 'DEBUG_MESSAGE' +] + [cls.__name__ for cls in DATASET_CLASSES] diff --git a/vlmeval/dataset/cgbench.py b/vlmeval/dataset/cgbench.py new file mode 100644 index 0000000000000000000000000000000000000000..172cdbb3614a7a279cac73e533c3b72e47fca2f3 --- /dev/null +++ b/vlmeval/dataset/cgbench.py @@ -0,0 +1,1760 @@ +from huggingface_hub import snapshot_download +from ..smp import * +from .video_base import VideoBaseDataset +from .utils import build_judge, DEBUG_MESSAGE +from .utils.cgbench import * +from ..utils import track_progress_rich + + +class CGBench_MCQ_Grounding_Mini(VideoBaseDataset): + + dataset = "CG-Bench_MCQ_Grounding_Mini" + + TYPE = "Video-MCQ-Grounding" + + MD5 = "54ed3e90a51a6fb375c92b319a715f72" + + SYS = { + "long_acc": ( + "You will be provided with sampled frames from a video, along with a " + "multiple-choice question that includes a question and several answer options.\n" + "Your task is to analyze the provided frames, infer the most plausible " + "answer based on the visual information.\n" + "If the video does not provide enough information, infer the answer based " + "on the options available and still provide a result. " + "Therefore, In all cases, an answer must be given.\n" + "Only output the answer in the following format:\n\n" + '```json\n{"result": "option"}\n```\n\n' + 'The "option" is the uppercase letter corresponding to your answer.\n\n' + ), + "clue_acc": ( + "You will be provided with sampled frames from a video, along with a " + "multiple-choice question that includes a question and several answer options.\n" + "Your task is to analyze the provided frames, infer the most plausible " + "answer based on the visual information.\n" + "If the video does not provide enough information, infer the answer based " + "on the options available and still provide a result. " + "Therefore, In all cases, an answer must be given.\n" + "Only output the answer in the following format:\n\n" + '```json\n{"result": "option"}\n```\n\n' + "The 'option' is the uppercase letter corresponding to your answer.\n\n" + ), + "miou": ( + "You will be provided with uniformly sampled frames from a video and their " + "timestamps, along with a multiple-choice question that includes a question " + "and several answer options.\n" + "Your task is to determine in which intervals the 'clue intervals' exist " + "that contain visual information needed to answer the question.\n" + "Only output the answer in the following format:\n\n" + '```json\n{"result": [[start1, end1], [start2, end2], ...]}\n```\n\n' + "In this output format, each 'start' and 'end' represents the beginning and " + "end of an interval in seconds where relevant clues can be found.\n" + "You must provide at least one interval and at most five intervals. " + "Intervals exceeding five will NOT be considered valid.\n" + ), + "miou_wo_frame_time": ( + "You will be provided with uniformly sampled frames from a video, along " + "with a multiple-choice question that includes a question and several " + "answer options.\n" + "Your task is to determine in which intervals the 'clue intervals' exist " + "that contain visual information needed to answer the question.\n" + "Only output the answer in the following format:\n\n" + '```json\n{"result": [[start1, end1], [start2, end2], ...]}\n```\n\n' + 'In this output format, each "start" and "end" represents the start and ' + "end of the video where the relevant clue can be found in the form of a " + "floating point number between 0 and 1, where 0 represents the start time " + "of the video and 1 represents the end time of the video.\n" + "You must provide at least one interval and at most five intervals. " + "Intervals exceeding five will NOT be considered valid.\n" + ), + } + + def __init__( + self, + dataset="CG-Bench_MCQ_Grounding_Mini", + use_subtitle=False, + use_subtitle_time=False, + use_frame_time=False, + nframe=0, + fps=-1, + ): + super().__init__(dataset=dataset, nframe=nframe, fps=fps) + self.use_subtitle = use_subtitle + self.use_subtitle_time = use_subtitle_time + self.use_frame_time = use_frame_time + self.dataset_name = dataset + lmu_root = LMUDataRoot() + self.clue_frame_root = osp.join(lmu_root, "clue_images", dataset) + + @classmethod + def supported_datasets(cls): + return ["CG-Bench_MCQ_Grounding_Mini"] + + def clue_frame_paths(self, qid, num_frames=8): + frame_root = osp.join(self.clue_frame_root, qid) + os.makedirs(frame_root, exist_ok=True) + return [osp.join(frame_root, self.frame_tmpl.format(i, num_frames)) for i in range(1, num_frames + 1)] + + def clue_frame_paths_fps(self, qid, num_frames=8, fps=-1): + frame_root = osp.join(self.clue_frame_root, qid) + os.makedirs(frame_root, exist_ok=True) + return [osp.join(frame_root, self.frame_tmpl_fps.format(i, num_frames, fps)) for i in range(1, num_frames + 1)] + + def get_subtitles(self, subtitle_path, frame_indices=None, fps=None, sub_time=False): + + subtitles = [] + + srt_path = osp.join(self.data_root, subtitle_path) + assert osp.exists(srt_path) + import pysubs2 + + subs = pysubs2.load(srt_path, encoding="utf-8") + if not frame_indices: + for sub in subs: + sub_text = sub.text.replace("\\N", " ") + if sub_time: + start_time = milliseconds_to_seconds(sub.start) + end_time = milliseconds_to_seconds(sub.end) + sub_text = f"[{start_time}, {end_time}] {sub_text}" + if sub_text.strip() and sub_text not in subtitles: + subtitles.append(sub_text) + else: + for selected_frame_id in frame_indices: + cur_time = pysubs2.make_time(fps=fps, frames=selected_frame_id) + for sub in subs: + if sub.start < cur_time and sub.end > cur_time: + sub_text = sub.text.replace("\\N", " ") + if sub_time: + start_time = milliseconds_to_seconds(sub.start) + end_time = milliseconds_to_seconds(sub.end) + sub_text = f"[{start_time}, {end_time}] {sub_text}" + if sub_text.strip() and sub_text not in subtitles: + subtitles.append(sub_text) + + if subtitles: + subtitles_str = '\n'.join(subtitles) + return f"The subtitles of the video are as follows:\n\n{subtitles_str}\n\n" + else: + return "" + + def prepare_dataset(self, dataset_name="CG-Bench_MCQ_Grounding_Mini", repo_id="CG-Bench/CG-Bench"): + + def check_integrity(pth): + data_file = osp.join(pth, f"{dataset_name}.tsv") + + if not os.path.exists(data_file): + return False + + if md5(data_file) != self.MD5: + return False + data = load(data_file) + for video_pth in data["video"]: + if not osp.exists(osp.join(pth, video_pth)): + return False + + return True + + cache_path = get_cache_path(repo_id) + + if cache_path is not None and check_integrity(cache_path): + dataset_path = cache_path + else: + + def generate_tsv(pth): + + tsv_file = osp.join(pth, f"{dataset_name}.tsv") + + task_modes = ["long_acc", "clue_acc", "miou"] + all_data = [] + for task_mode in task_modes: + with open(osp.join(pth, "cgbench_mini.json"), "r") as f: + data_file = pd.DataFrame(json.load(f)) + + data_file = data_file.assign(index=range(len(data_file))) + data_file["video"] = data_file["video_uid"].apply(lambda x: f"cg_videos_720p/{x}.mp4") + data_file["subtitle_path"] = data_file["video_uid"].apply( + lambda x: ( + f"cg_subtitles/{x}.srt" + if osp.exists(osp.join(dataset_path, f"cg_subtitles/{x}.srt")) + else "" + ) + ) + + data_file["clue_video_path"] = "" + + if task_mode in ["clue_acc"]: + data_file["clue_video_path"] = data_file["clue_video_path"] = data_file.apply( + lambda row: f"cg_clue_videos/{row['qid']}.mp4", axis=1 + ) + + data_file["task_mode"] = task_mode + + if task_mode in ["clue_acc", "long_acc"]: + data_file["answer"] = data_file["right_answer"] + + if task_mode == "miou": + data_file["answer"] = data_file["clue_intervals"] + + if task_mode in ["long_acc", "miou"]: + data_file["clue_intervals"] = "" + + data_file = data_file[ + [ + "index", + "video_uid", + "video", + "duration", + "domain", + "choices", + "sub_category", + "subtitle_path", + "question", + "answer", + "task_mode", + "clue_intervals", + "qid", + "clue_video_path", + ] + ] + + all_data.append(data_file) + + final_data = pd.concat(all_data, ignore_index=True) + final_data["index"] = range(len(final_data)) + final_data.to_csv(tsv_file, sep="\t", index=False) + + if modelscope_flag_set(): + from modelscope import dataset_snapshot_download + + dataset_path = dataset_snapshot_download(dataset_id=repo_id) + else: + dataset_path = snapshot_download(repo_id=repo_id, repo_type="dataset") + + unzip_hf_zip(dataset_path) + generate_tsv(dataset_path) + + tsv_file = osp.join(dataset_path, f"{dataset_name}.tsv") + + return dict(data_file=tsv_file, root=dataset_path) + + def build_prompt(self, line, video_llm): + + if isinstance(line, int): + assert line < len(self) + line = self.data.iloc[line] + + task_mode = line["task_mode"] + + message = [] + + origin_use_subtitle_time = self.use_subtitle_time + + try: + if task_mode in ["long_acc", "clue_acc"]: + system_prompt = self.SYS[task_mode] + elif task_mode == "miou": + if self.use_frame_time and not video_llm: + system_prompt = self.SYS[task_mode] + else: + system_prompt = self.SYS["miou_wo_frame_time"] + if self.use_subtitle_time is True: + self.use_subtitle_time = False + + user_prompt = "" + + if task_mode in ["long_acc", "miou"]: + video_path = line["video"] + + if video_llm: + message.append(dict(type="video", value=osp.join(self.data_root, video_path))) + + if self.use_subtitle and line["subtitle_path"] and not pd.isna(line["subtitle_path"]): + if self.nframe: + image_paths, frame_indices, vid_fps = self.save_video_frames( + video_path, uid=line["video_uid"], num_frames=self.nframe, fps=self.fps + ) + user_prompt += self.get_subtitles(line["subtitle_path"], frame_indices=frame_indices, + fps=vid_fps, sub_time=self.use_subtitle_time) + else: + user_prompt += self.get_subtitles(line["subtitle_path"], sub_time=self.use_subtitle_time) + else: + image_paths, frame_indices, vid_fps = self.save_video_frames( + video_path, uid=line["video_uid"], num_frames=self.nframe, fps=self.fps + ) + message.extend(dict(type="image", value=im) for im in image_paths) + + if self.use_frame_time: + user_prompt += get_timestampes(frame_indices, vid_fps) + + if self.use_subtitle and line["subtitle_path"] and not pd.isna(line["subtitle_path"]): + user_prompt += self.get_subtitles( + line["subtitle_path"], frame_indices=frame_indices, fps=vid_fps, + sub_time=self.use_subtitle_time + ) + + elif task_mode == "clue_acc": + clue_video_path = line["clue_video_path"] + video_path = line["video"] + + if video_llm: + message.append(dict(type="video", value=osp.join(self.data_root, clue_video_path))) + print(message) + + if self.use_subtitle and line["subtitle_path"] and not pd.isna(line["subtitle_path"]): + if self.nframe: + image_paths, frame_indices, vid_fps = self.save_video_frames( + video_path, uid=line["video_uid"], num_frames=self.nframe, fps=self.fps + ) + user_prompt += self.get_subtitles(line["subtitle_path"], frame_indices=frame_indices, + fps=vid_fps, sub_time=self.use_subtitle_time) + else: + user_prompt += self.get_subtitles(line["subtitle_path"], sub_time=self.use_subtitle_time) + else: + if self.nframe > 32: + self.nframe = 32 + print("The maximum number of frames is 32 when evaluating clue-based mcq in CG-Bench !") + + clue_intervals = eval(line["clue_intervals"]) + + image_paths, frame_indices, vid_fps = self.save_video_frames( + video_path, uid=line["qid"], clue_intervals=clue_intervals, num_frames=self.nframe, fps=self.fps + ) + + message.extend(dict(type="image", value=im) for im in image_paths) + + if self.use_frame_time: + user_prompt += get_timestampes(frame_indices, vid_fps) + + if self.use_subtitle and line["subtitle_path"] and not pd.isna(line["subtitle_path"]): + user_prompt += self.get_subtitles( + line["subtitle_path"], frame_indices=frame_indices, fps=vid_fps, + sub_time=self.use_subtitle_time + ) + + question = line["question"] + user_prompt += f"Question: {question}\n\n" + + choices = eval(line["choices"]) + labels = [chr(ord("A") + i) for i in range(len(choices))] + user_prompt += "\n".join([f"{label}:{value}" for label, value in zip(labels, choices)]) + "\n\n" + + message.append(dict(type="text", value=system_prompt + user_prompt)) + + return message + + finally: + # Ensure that `use_subtitle_time` is always restored to its original value + self.use_subtitle_time = origin_use_subtitle_time + + def save_video_frames(self, video, uid, clue_intervals=None, num_frames=8, fps=-1): + + if type(uid) is not str: + uid = str(uid) + + vid_path = osp.join(self.data_root, video) + vid = decord.VideoReader(vid_path) + vid_fps = vid.get_avg_fps() + n_frames = len(vid) + + if clue_intervals is not None: + merged_intervals = merge_intervals(clue_intervals) + + if num_frames > 0 and fps < 0: + indices = sample_frames_clue_average(merged_intervals, num_frames, vid_fps) + frame_paths = self.clue_frame_paths(uid, len(indices)) + + elif fps > 0: + frame_indices = [] + for start, end in merged_intervals: + start_frame = int(start * vid_fps) + end_frame = int(end * vid_fps) + step = vid_fps / fps + interval_indices = [ + int(start_frame + i * step) for i in range(int((end_frame - start_frame) / step)) + ] + frame_indices.extend(interval_indices) + + if len(frame_indices) < 32: + indices = sample_frames_clue_average(merged_intervals, 32, vid_fps) + else: + indices = frame_indices + frame_paths = self.clue_frame_paths_fps(uid, len(indices), fps) + + else: + if num_frames > 0 and fps < 0: + step_size = len(vid) / (num_frames + 1) + indices = [int(i * step_size) for i in range(1, num_frames + 1)] + + frame_paths = self.frame_paths(uid) + elif fps > 0: + total_duration = n_frames / vid_fps + required_frames = int(total_duration * fps) + step_size = vid_fps / fps + indices = [int(i * step_size) for i in range(required_frames)] + frame_paths = self.frame_paths_fps(uid, len(indices)) + + # Save and validate frames + valid_paths = [] + valid_indices = [] + + if not np.all([osp.exists(p) for p in frame_paths]): + images = [vid[i].asnumpy() for i in indices] + for i, (img_array, path) in enumerate(zip(images, frame_paths)): + if osp.exists(path): + try: + with Image.open(path) as img: + img.verify() + valid_paths.append(path) + valid_indices.append(indices[i]) + except Exception: + continue + else: + try: + img = Image.fromarray(img_array) + img.save(path) + img.verify() + valid_paths.append(path) + valid_indices.append(indices[i]) + except Exception: + continue + else: + for i, path in enumerate(frame_paths): + try: + with Image.open(path) as img: + img.verify() + valid_paths.append(path) + valid_indices.append(indices[i]) + except Exception: + continue + + return valid_paths, valid_indices, vid_fps + + def evaluate(self, eval_file, **judge_kwargs): + + assert eval_file.endswith(".xlsx"), "data file should be an xlsx file" + + tgt_file = eval_file.replace(".xlsx", "_rating.json") + score_file = eval_file.replace(".xlsx", "_score.xlsx") + + data = load(eval_file) + + data_un = data[~pd.isna(data["prediction"])] + data_pred_na = data[pd.isna(data["prediction"])] + + data_pred_na["score"] = -1 + + data_un["score"] = data_un.apply( + lambda row: post_process( + response=row["prediction"], + right_answer=row["answer"], + task_mode=row["task_mode"], + duration=row["duration"], + ), + axis=1, + ) + + data = pd.concat([data_pred_na, data_un]) + + rejected_count = (data["score"] == -1).sum() + + print( + f"Among {len(data)} questions, " + f"failed to obtain prediction for {len(data_pred_na)} questions, " + f"failed to obtain the score for {rejected_count - len(data_pred_na)} questions. " + f"Those questions will be counted as -1 score in ALL rating, and will not be counted in VALID rating." + ) + + dump(data, score_file) + + rating = get_dimention_rating_mcq_grouding(score_file) + + dump(rating, tgt_file) + + return rating + + +# 评估时,step_2 评估时,给出 [prompt] + image_paths 就行 +class CGBench_OpenEnded_Mini(VideoBaseDataset): + + TYPE = "Video-OpenEnded" + + dataset = "CG-Bench_OpenEnded_Mini" + + MD5 = "9175791b11afdfa305fdb3e525b7a4ee" + + SYS = ( + "You will be provided with sampled frames from a video, along with a " + "question.\n" + "Your task is to analyze the provided frames and infer the most plausible " + "answer based on the visual information.\n" + "If the visual information is ambiguous or insufficient, use the available " + "context to reason your answer.\n" + "Only output the answer in the following format:\n\n" + '```json\n{"result": "answer"}\n```\n\n' + 'The "answer" can be a word, phrase, or sentence that directly responds to ' + "the question.\n\n" + ) + + def __init__( + self, + dataset="CG-Bench_OpenEnded_Mini", + use_subtitle=False, + use_subtitle_time=False, + use_frame_time=False, + nframe=0, + fps=-1, + ): + super().__init__(dataset=dataset, nframe=nframe, fps=fps) + self.use_subtitle = use_subtitle + self.use_subtitle_time = use_subtitle_time + self.use_frame_time = use_frame_time + self.dataset_name = dataset + lmu_root = LMUDataRoot() + self.clue_frame_root = osp.join(lmu_root, "clue_images", dataset) + + @classmethod + def supported_datasets(cls): + return ["CG-Bench_OpenEnded_Mini"] + + def get_subtitles(self, subtitle_path, frame_indices=None, fps=None, sub_time=False): + + subtitles = [] + + srt_path = osp.join(self.data_root, subtitle_path) + assert osp.exists(srt_path) + import pysubs2 + + subs = pysubs2.load(srt_path, encoding="utf-8") + if not frame_indices: + for sub in subs: + sub_text = sub.text.replace("\\N", " ") + if sub_time: + start_time = milliseconds_to_seconds(sub.start) + end_time = milliseconds_to_seconds(sub.end) + sub_text = f"[{start_time}, {end_time}] {sub_text}" + if sub_text.strip() and sub_text not in subtitles: + subtitles.append(sub_text) + else: + for selected_frame_id in frame_indices: + cur_time = pysubs2.make_time(fps=fps, frames=selected_frame_id) + for sub in subs: + if sub.start < cur_time and sub.end > cur_time: + sub_text = sub.text.replace("\\N", " ") + if sub_time: + start_time = milliseconds_to_seconds(sub.start) + end_time = milliseconds_to_seconds(sub.end) + sub_text = f"[{start_time}, {end_time}] {sub_text}" + if sub_text.strip() and sub_text not in subtitles: + subtitles.append(sub_text) + + if subtitles: + subtitles_str = '\n'.join(subtitles) + return f"The subtitles of the video are as follows:\n\n{subtitles_str}\n\n" + else: + return "" + + def prepare_dataset(self, dataset_name="CG-Bench_OpenEnded_Mini", repo_id="CG-Bench/CG-Bench"): + + def check_integrity(pth): + data_file = osp.join(pth, f"{dataset_name}.tsv") + + if not os.path.exists(data_file): + return False + + if md5(data_file) != self.MD5: + return False + data = load(data_file) + for video_pth in data["video"]: + if not osp.exists(osp.join(pth, video_pth)): + return False + + return True + + cache_path = get_cache_path(repo_id) + + if cache_path is not None and check_integrity(cache_path): + dataset_path = cache_path + else: + + def generate_tsv(pth): + + tsv_file = osp.join(pth, f"{dataset_name}.tsv") + + with open(osp.join(pth, "cgbench_mini.json"), "r") as f: + data_file = pd.DataFrame(json.load(f)) + + data_file = data_file.assign(index=range(len(data_file))) + data_file["video"] = data_file["video_uid"].apply(lambda x: f"cg_videos_720p/{x}.mp4") + data_file["subtitle_path"] = data_file["video_uid"].apply( + lambda x: f"cg_subtitles/{x}.srt" if osp.exists(osp.join(pth, f"cg_subtitles/{x}.srt")) else "" + ) + + data_file = data_file[ + [ + "index", + "video_uid", + "video", + "duration", + "domain", + "sub_category", + "subtitle_path", + "question", + "answer", + "clue_intervals", + "qid", + ] + ] + + data_file.to_csv(tsv_file, sep="\t", index=False) + + if modelscope_flag_set(): + from modelscope import dataset_snapshot_download + + dataset_path = dataset_snapshot_download(dataset_id=repo_id) + else: + dataset_path = snapshot_download(repo_id=repo_id, repo_type="dataset") + + unzip_hf_zip(dataset_path) + generate_tsv(dataset_path) + + tsv_file = osp.join(dataset_path, f"{dataset_name}.tsv") + + return dict(data_file=tsv_file, root=dataset_path) + + def build_prompt(self, line, video_llm): + + if isinstance(line, int): + assert line < len(self) + line = self.data.iloc[line] + + message = [] + + sys_prompt = self.SYS + + user_prompt = "" + + video_path = line["video"] + + if video_llm: + message.append(dict(type="video", value=osp.join(self.data_root, video_path))) + if self.use_subtitle and line["subtitle_path"] and not pd.isna(line["subtitle_path"]): + if self.nframe: + image_paths, frame_indices, vid_fps = self.save_video_frames( + video_path, uid=line["video_uid"], num_frames=self.nframe, fps=self.fps + ) + user_prompt += self.get_subtitles(line["subtitle_path"], frame_indices=frame_indices, + fps=vid_fps, sub_time=self.use_subtitle_time) + else: + user_prompt += self.get_subtitles(line["subtitle_path"], sub_time=self.use_subtitle_time) + else: + image_paths, frame_indices, vid_fps = self.save_video_frames( + video_path, uid=line["video_uid"], num_frames=self.nframe, fps=self.fps + ) + message.extend(dict(type="image", value=im) for im in image_paths) + + if self.use_frame_time: + user_prompt += get_timestampes(frame_indices, vid_fps) + + if self.use_subtitle and line["subtitle_path"] and not pd.isna(line["subtitle_path"]): + user_prompt += self.get_subtitles( + line["subtitle_path"], frame_indices=frame_indices, fps=vid_fps, + sub_time=self.use_subtitle_time + ) + + question = line["question"] + user_prompt += f"Question: {question}\n\n" + + message.append(dict(type="text", value=sys_prompt + user_prompt)) + + return message + + def clue_frame_paths(self, qid, num_frames=8): + frame_root = osp.join(self.clue_frame_root, qid) + os.makedirs(frame_root, exist_ok=True) + return [osp.join(frame_root, self.frame_tmpl.format(i, num_frames)) for i in range(1, num_frames + 1)] + + def save_video_frames(self, video, uid, clue_intervals=None, num_frames=8, fps=-1): + + if type(uid) is not str: + uid = str(uid) + + vid_path = osp.join(self.data_root, video) + vid = decord.VideoReader(vid_path) + vid_fps = vid.get_avg_fps() + n_frames = len(vid) + + if clue_intervals is not None: + merged_intervals = merge_intervals(clue_intervals) + + if num_frames > 0 and fps < 0: + indices = sample_frames_clue_average(merged_intervals, num_frames, vid_fps) + frame_paths = self.clue_frame_paths(uid, len(indices)) + + elif fps > 0: + frame_indices = [] + for start, end in merged_intervals: + start_frame = int(start * vid_fps) + end_frame = int(end * vid_fps) + step = vid_fps / fps + interval_indices = [ + int(start_frame + i * step) for i in range(int((end_frame - start_frame) / step)) + ] + frame_indices.extend(interval_indices) + + if len(frame_indices) < 32: + indices = sample_frames_clue_average(merged_intervals, 32, vid_fps) + else: + indices = frame_indices + frame_paths = self.clue_frame_paths_fps(uid, len(indices), fps) + + else: + if num_frames > 0 and fps < 0: + step_size = len(vid) / (num_frames + 1) + indices = [int(i * step_size) for i in range(1, num_frames + 1)] + frame_paths = self.frame_paths(uid) + elif fps > 0: + total_duration = n_frames / vid_fps + required_frames = int(total_duration * fps) + step_size = vid_fps / fps + indices = [int(i * step_size) for i in range(required_frames)] + frame_paths = self.frame_paths_fps(uid, len(indices)) + + valid_paths = [] + valid_indices = [] + + if not np.all([osp.exists(p) for p in frame_paths]): + images = [vid[i].asnumpy() for i in indices] + for i, (img_array, path) in enumerate(zip(images, frame_paths)): + if osp.exists(path): + try: + with Image.open(path) as img: + img.verify() + valid_paths.append(path) + valid_indices.append(indices[i]) + except Exception: + continue + else: + try: + img = Image.fromarray(img_array) + img.save(path) + img.verify() + valid_paths.append(path) + valid_indices.append(indices[i]) + except Exception: + continue + else: + for i, path in enumerate(frame_paths): + try: + with Image.open(path) as img: + img.verify() + valid_paths.append(path) + valid_indices.append(indices[i]) + except Exception: + continue + + return valid_paths, valid_indices, vid_fps + + def evaluate(self, eval_file, **judge_kwargs): + + from .utils.cgbench import get_dimention_rating_open_ended, post_process_open + + assert eval_file.endswith(".xlsx"), "data file should be an xlsx file" + + tgt_file = eval_file.replace(".xlsx", "_rating.json") + score_file = eval_file.replace(".xlsx", "_score.xlsx") + step_1_tmp_file = eval_file.replace(".xlsx", "_step_1.pkl") + step_2_tmp_file = eval_file.replace(".xlsx", "_step_2.pkl") + + data = load(eval_file) + + data_pred_no_na = data[~pd.isna(data["prediction"])] + data_pred_na = data[pd.isna(data["prediction"])] + + data_pred_na["model_result"] = -1 + data_pred_na["step_1_result"] = -1 + data_pred_na["step_2_result"] = -1 + data_pred_na["score"] = -1 + + data_pred_no_na["model_result"] = data_pred_no_na.apply( + lambda row: post_process_open( + response=row["prediction"], + ), + axis=1, + ) + + data_no_model_result = data_pred_no_na[data_pred_no_na["model_result"] == -1] + data_step_1 = data_pred_no_na[data_pred_no_na["model_result"] != -1] + + if judge_kwargs.get("model", None) != "gpt-4o-0806": + judge_kwargs["model"] = "gpt-4o-0806" + print("The judge model in cg-bench is gpt-4o-0806!") + + model_step_1 = build_judge(system_prompt=sys_prompt_open_eval_step_1, **judge_kwargs) + nproc = judge_kwargs.pop("nproc", 32) + + lines_step_1 = data_step_1.to_dict("records") + tups_step_1 = [(model_step_1, line) for line in lines_step_1] + + keys_step_1 = {line["qid"] for line in lines_step_1} + + ans = {} + if osp.exists(step_1_tmp_file): + ans = load(step_1_tmp_file) + tups_step_1 = [x for x, i in zip(tups_step_1, keys_step_1) if i not in ans] + keys_step_1 = [i for i in keys_step_1 if i not in ans] + + _ = track_progress_rich( + eval_open_first, + tups_step_1, + nproc=nproc, + keys=keys_step_1, + save=step_1_tmp_file, + ) + + step_1_results = load(step_1_tmp_file) + data_step_1 = save_step_1_steps(data_step_1, step_1_results) # -1, 0, 1, 2 + + data_no_step_1_results = data_step_1[data_step_1["step_1_result"] == -1] + data_step_1_over = data_step_1[data_step_1["step_1_result"].isin([0, 1])] + data_step_2 = data_step_1[data_step_1["step_1_result"] == 2] + + print(judge_kwargs) + + model_step_2 = build_judge(system_prompt=sys_prompt_open_eval_step_2, **judge_kwargs) + + lines_step_2 = data_step_2.to_dict("records") + + tups_step_2 = [] + + for line in tqdm(lines_step_2): + clue_intervals = eval(line["clue_intervals"]) + lmu_root = LMUDataRoot() + clue_frame_root = osp.join(lmu_root, "clue_images", self.dataset) + data_root = self.data_root + frame_paths, _, _ = save_clue_video_frames( + data_root, + clue_frame_root, + video=line["video"], + uid=line["qid"], + clue_intervals=clue_intervals, + num_frames=32, + ) + tups_step_2.append((model_step_2, line, frame_paths)) + + keys_step_2 = {line["qid"] for line in lines_step_2} + + ans = {} + if osp.exists(step_2_tmp_file): + ans = load(step_2_tmp_file) + tups_step_2 = [x for x, i in zip(tups_step_2, keys_step_2) if i not in ans] + keys_step_2 = [i for i in keys_step_2 if i not in ans] + + _ = track_progress_rich( + eval_open_second, + tups_step_2, + nproc=nproc, + keys=keys_step_2, + save=step_2_tmp_file, + ) + + step_2_results = load(step_2_tmp_file) + data_step_2 = save_step_2_steps(data_step_2, step_2_results) + + data_no_step_2_results = data_step_2[data_step_2["score"] == -1] + data_step_2_over = data_step_2[data_step_2["score"].isin([0, 1])] + + data = pd.concat( + [ + data_pred_na, + data_no_model_result, + data_no_step_1_results, + data_step_1_over, + data_no_step_2_results, + data_step_2_over, + ] + ) + + dump(data, score_file) + + rating = get_dimention_rating_open_ended(score_file) + + dump(rating, tgt_file) + + return rating + + +class CGBench_MCQ_Grounding(VideoBaseDataset): + + TYPE = "Video-MCQ-Grounding" + + MD5 = "eaead3d978a689269fefce4ae29c86df" + + SYS = { + "long_acc": ( + "You will be provided with sampled frames from a video, along with a " + "multiple-choice question that includes a question and several answer options.\n" + "Your task is to analyze the provided frames, infer the most plausible " + "answer based on the visual information.\n" + "If the video does not provide enough information, infer the answer based " + "on the options available and still provide a result. " + "Therefore, In all cases, an answer must be given.\n" + "Only output the answer in the following format:\n\n" + '```json\n{"result": "option"}\n```\n\n' + 'The "option" is the uppercase letter corresponding to your answer.\n\n' + ), + "clue_acc": ( + "You will be provided with sampled frames from a video, along with a " + "multiple-choice question that includes a question and several answer options.\n" + "Your task is to analyze the provided frames, infer the most plausible " + "answer based on the visual information.\n" + "If the video does not provide enough information, infer the answer based " + "on the options available and still provide a result. " + "Therefore, In all cases, an answer must be given.\n" + "Only output the answer in the following format:\n\n" + '```json\n{"result": "option"}\n```\n\n' + "The 'option' is the uppercase letter corresponding to your answer.\n\n" + ), + "miou": ( + "You will be provided with uniformly sampled frames from a video and their " + "timestamps, along with a multiple-choice question that includes a question " + "and several answer options.\n" + "Your task is to determine in which intervals the 'clue intervals' exist " + "that contain visual information needed to answer the question.\n" + "Only output the answer in the following format:\n\n" + '```json\n{"result": [[start1, end1], [start2, end2], ...]}\n```\n\n' + "In this output format, each 'start' and 'end' represents the beginning and " + "end of an interval in seconds where relevant clues can be found.\n" + "You must provide at least one interval and at most five intervals. " + "Intervals exceeding five will NOT be considered valid.\n" + ), + "miou_wo_frame_time": ( + "You will be provided with uniformly sampled frames from a video, along " + "with a multiple-choice question that includes a question and several " + "answer options.\n" + "Your task is to determine in which intervals the 'clue intervals' exist " + "that contain visual information needed to answer the question.\n" + "Only output the answer in the following format:\n\n" + '```json\n{"result": [[start1, end1], [start2, end2], ...]}\n```\n\n' + 'In this output format, each "start" and "end" represents the start and ' + "end of the video where the relevant clue can be found in the form of a " + "floating point number between 0 and 1, where 0 represents the start time " + "of the video and 1 represents the end time of the video.\n" + "You must provide at least one interval and at most five intervals. " + "Intervals exceeding five will NOT be considered valid.\n" + ), + } + + def __init__( + self, + dataset="CG-Bench_MCQ_Grounding", + use_subtitle=False, + use_subtitle_time=False, + use_frame_time=False, + nframe=0, + fps=-1, + ): + super().__init__(dataset=dataset, nframe=nframe, fps=fps) + self.use_subtitle = use_subtitle + self.use_subtitle_time = use_subtitle_time + self.use_frame_time = use_frame_time + self.dataset_name = dataset + lmu_root = LMUDataRoot() + self.clue_frame_root = osp.join(lmu_root, "clue_images", dataset) + + @classmethod + def supported_datasets(cls): + return ["CG-Bench_MCQ_Grounding"] + + def clue_frame_paths(self, qid, num_frames=8): + frame_root = osp.join(self.clue_frame_root, qid) + os.makedirs(frame_root, exist_ok=True) + return [osp.join(frame_root, self.frame_tmpl.format(i, num_frames)) for i in range(1, num_frames + 1)] + + def clue_frame_paths_fps(self, qid, num_frames=8, fps=-1): + frame_root = osp.join(self.clue_frame_root, qid) + os.makedirs(frame_root, exist_ok=True) + return [osp.join(frame_root, self.frame_tmpl_fps.format(i, num_frames, fps)) for i in range(1, num_frames + 1)] + + def get_subtitles(self, subtitle_path, frame_indices=None, fps=None, sub_time=False): + + subtitles = [] + + srt_path = osp.join(self.data_root, subtitle_path) + assert osp.exists(srt_path) + import pysubs2 + + subs = pysubs2.load(srt_path, encoding="utf-8") + if not frame_indices: + for sub in subs: + sub_text = sub.text.replace("\\N", " ") + if sub_time: + start_time = milliseconds_to_seconds(sub.start) + end_time = milliseconds_to_seconds(sub.end) + sub_text = f"[{start_time}, {end_time}] {sub_text}" + if sub_text.strip() and sub_text not in subtitles: + subtitles.append(sub_text) + else: + for selected_frame_id in frame_indices: + cur_time = pysubs2.make_time(fps=fps, frames=selected_frame_id) + for sub in subs: + if sub.start < cur_time and sub.end > cur_time: + sub_text = sub.text.replace("\\N", " ") + if sub_time: + start_time = milliseconds_to_seconds(sub.start) + end_time = milliseconds_to_seconds(sub.end) + sub_text = f"[{start_time}, {end_time}] {sub_text}" + if sub_text.strip() and sub_text not in subtitles: + subtitles.append(sub_text) + + if subtitles: + subtitles_str = '\n'.join(subtitles) + return f"The subtitles of the video are as follows:\n\n{subtitles_str}\n\n" + else: + return "" + + def prepare_dataset(self, dataset_name="CG-Bench_MCQ_Grounding", repo_id="CG-Bench/CG-Bench"): + + def check_integrity(pth): + data_file = osp.join(pth, f"{dataset_name}.tsv") + + if not os.path.exists(data_file): + return False + + if md5(data_file) != self.MD5: + return False + data = load(data_file) + for video_pth in data["video"]: + if not osp.exists(osp.join(pth, video_pth)): + return False + + for clue_video_pth in data["clue_video_path"]: + if clue_video_pth and not (isinstance(clue_video_pth, float) and np.isnan(clue_video_pth)): + if not osp.exists(osp.join(pth, clue_video_pth)): + return False + + return True + + cache_path = get_cache_path(repo_id) + + if cache_path is not None and check_integrity(cache_path): + dataset_path = cache_path + else: + + def generate_tsv(pth): + + tsv_file = osp.join(pth, f"{dataset_name}.tsv") + + task_modes = ["long_acc", "clue_acc", "miou"] + all_data = [] + for task_mode in task_modes: + with open(osp.join(pth, "cgbench.json"), "r") as f: + data_file = pd.DataFrame(json.load(f)) + + data_file = data_file.assign(index=range(len(data_file))) + data_file["video"] = data_file["video_uid"].apply(lambda x: f"cg_videos_720p/{x}.mp4") + data_file["subtitle_path"] = data_file["video_uid"].apply( + lambda x: ( + f"cg_subtitles/{x}.srt" + if osp.exists(osp.join(dataset_path, f"cg_subtitles/{x}.srt")) + else "" + ) + ) + + data_file["clue_video_path"] = "" + + if task_mode in ["clue_acc"]: + data_file["clue_video_path"] = data_file["clue_video_path"] = data_file.apply( + lambda row: f"cg_clue_videos/{row['qid']}.mp4", axis=1 + ) + + data_file["task_mode"] = task_mode + + if task_mode in ["clue_acc", "long_acc"]: + data_file["answer"] = data_file["right_answer"] + + if task_mode == "miou": + data_file["answer"] = data_file["clue_intervals"] + + if task_mode in ["long_acc", "miou"]: + data_file["clue_intervals"] = "" + + data_file = data_file[ + [ + "index", + "video_uid", + "video", + "duration", + "domain", + "choices", + "sub_category", + "subtitle_path", + "question", + "answer", + "task_mode", + "clue_intervals", + "qid", + "clue_video_path", + ] + ] + + all_data.append(data_file) + + final_data = pd.concat(all_data, ignore_index=True) + final_data["index"] = range(len(final_data)) + final_data.to_csv(tsv_file, sep="\t", index=False) + + if modelscope_flag_set(): + from modelscope import dataset_snapshot_download + + dataset_path = dataset_snapshot_download(dataset_id=repo_id) + else: + dataset_path = snapshot_download(repo_id=repo_id, repo_type="dataset") + + unzip_hf_zip(dataset_path) + generate_tsv(dataset_path) + + tsv_file = osp.join(dataset_path, f"{dataset_name}.tsv") + + return dict(data_file=tsv_file, root=dataset_path) + + def build_prompt(self, line, video_llm): + + if isinstance(line, int): + assert line < len(self) + line = self.data.iloc[line] + + task_mode = line["task_mode"] + + message = [] + + origin_use_subtitle_time = self.use_subtitle_time + + try: + if task_mode in ["long_acc", "clue_acc"]: + system_prompt = self.SYS[task_mode] + elif task_mode == "miou": + if self.use_frame_time and not video_llm: + system_prompt = self.SYS[task_mode] + else: + system_prompt = self.SYS["miou_wo_frame_time"] + if self.use_subtitle_time is True: + self.use_subtitle_time = False + + user_prompt = "" + + if task_mode in ["long_acc", "miou"]: + video_path = line["video"] + + if video_llm: + message.append(dict(type="video", value=osp.join(self.data_root, video_path))) + + if self.use_subtitle and line["subtitle_path"] and not pd.isna(line["subtitle_path"]): + if self.nframe: + image_paths, frame_indices, vid_fps = self.save_video_frames( + video_path, uid=line["video_uid"], num_frames=self.nframe, fps=self.fps + ) + user_prompt += self.get_subtitles(line["subtitle_path"], frame_indices=frame_indices, + fps=vid_fps, sub_time=self.use_subtitle_time) + else: + user_prompt += self.get_subtitles(line["subtitle_path"], sub_time=self.use_subtitle_time) + else: + image_paths, frame_indices, vid_fps = self.save_video_frames( + video_path, uid=line["video_uid"], num_frames=self.nframe, fps=self.fps + ) + message.extend(dict(type="image", value=im) for im in image_paths) + + if self.use_frame_time: + user_prompt += get_timestampes(frame_indices, vid_fps) + + if self.use_subtitle and line["subtitle_path"] and not pd.isna(line["subtitle_path"]): + user_prompt += self.get_subtitles( + line["subtitle_path"], frame_indices=frame_indices, fps=vid_fps, + sub_time=self.use_subtitle_time + ) + + elif task_mode == "clue_acc": + clue_video_path = line["clue_video_path"] + video_path = line["video"] + + if video_llm: + message.append(dict(type="video", value=osp.join(self.data_root, clue_video_path))) + print(message) + + if self.use_subtitle and line["subtitle_path"] and not pd.isna(line["subtitle_path"]): + if self.nframe: + image_paths, frame_indices, vid_fps = self.save_video_frames( + video_path, uid=line["video_uid"], num_frames=self.nframe, fps=self.fps + ) + user_prompt += self.get_subtitles(line["subtitle_path"], frame_indices=frame_indices, + fps=vid_fps, sub_time=self.use_subtitle_time) + else: + user_prompt += self.get_subtitles(line["subtitle_path"], sub_time=self.use_subtitle_time) + else: + if self.nframe > 32: + self.nframe = 32 + print("The maximum number of frames is 32 when evaluating clue-based mcq in CG-Bench !") + + clue_intervals = eval(line["clue_intervals"]) + + image_paths, frame_indices, vid_fps = self.save_video_frames( + video_path, uid=line["qid"], clue_intervals=clue_intervals, num_frames=self.nframe, fps=self.fps + ) + + message.extend(dict(type="image", value=im) for im in image_paths) + + if self.use_frame_time: + user_prompt += get_timestampes(frame_indices, vid_fps) + + if self.use_subtitle and line["subtitle_path"] and not pd.isna(line["subtitle_path"]): + user_prompt += self.get_subtitles( + line["subtitle_path"], frame_indices=frame_indices, fps=vid_fps, + sub_time=self.use_subtitle_time + ) + + question = line["question"] + user_prompt += f"Question: {question}\n\n" + + choices = eval(line["choices"]) + labels = [chr(ord("A") + i) for i in range(len(choices))] + user_prompt += "\n".join([f"{label}:{value}" for label, value in zip(labels, choices)]) + "\n\n" + + message.append(dict(type="text", value=system_prompt + user_prompt)) + + return message + + finally: + # Ensure that `use_subtitle_time` is always restored to its original value + self.use_subtitle_time = origin_use_subtitle_time + + def save_video_frames(self, video, uid, clue_intervals=None, num_frames=8, fps=-1): + + if type(uid) is not str: + uid = str(uid) + + vid_path = osp.join(self.data_root, video) + vid = decord.VideoReader(vid_path) + vid_fps = vid.get_avg_fps() + n_frames = len(vid) + + if clue_intervals is not None: + merged_intervals = merge_intervals(clue_intervals) + + if num_frames > 0 and fps < 0: + indices = sample_frames_clue_average(merged_intervals, num_frames, vid_fps) + frame_paths = self.clue_frame_paths(uid, len(indices)) + + elif fps > 0: + frame_indices = [] + for start, end in merged_intervals: + start_frame = int(start * vid_fps) + end_frame = int(end * vid_fps) + step = vid_fps / fps + interval_indices = [ + int(start_frame + i * step) for i in range(int((end_frame - start_frame) / step)) + ] + frame_indices.extend(interval_indices) + + if len(frame_indices) < 32: + indices = sample_frames_clue_average(merged_intervals, 32, vid_fps) + else: + indices = frame_indices + frame_paths = self.clue_frame_paths_fps(uid, len(indices), fps) + + else: + if num_frames > 0 and fps < 0: + step_size = len(vid) / (num_frames + 1) + indices = [int(i * step_size) for i in range(1, num_frames + 1)] + + frame_paths = self.frame_paths(uid) + elif fps > 0: + total_duration = n_frames / vid_fps + required_frames = int(total_duration * fps) + step_size = vid_fps / fps + indices = [int(i * step_size) for i in range(required_frames)] + frame_paths = self.frame_paths_fps(uid, len(indices)) + + # Save and validate frames + valid_paths = [] + valid_indices = [] + + if not np.all([osp.exists(p) for p in frame_paths]): + images = [vid[i].asnumpy() for i in indices] + for i, (img_array, path) in enumerate(zip(images, frame_paths)): + if osp.exists(path): + try: + with Image.open(path) as img: + img.verify() + valid_paths.append(path) + valid_indices.append(indices[i]) + except Exception: + continue + else: + try: + img = Image.fromarray(img_array) + img.save(path) + img.verify() + valid_paths.append(path) + valid_indices.append(indices[i]) + except Exception: + continue + else: + for i, path in enumerate(frame_paths): + try: + with Image.open(path) as img: + img.verify() + valid_paths.append(path) + valid_indices.append(indices[i]) + except Exception: + continue + + return valid_paths, valid_indices, vid_fps + + def evaluate(self, eval_file, **judge_kwargs): + + assert eval_file.endswith(".xlsx"), "data file should be an xlsx file" + + tgt_file = eval_file.replace(".xlsx", "_rating.json") + score_file = eval_file.replace(".xlsx", "_score.xlsx") + + data = load(eval_file) + + data_un = data[~pd.isna(data["prediction"])] + data_pred_na = data[pd.isna(data["prediction"])] + + data_pred_na["score"] = -1 + + data_un["score"] = data_un.apply( + lambda row: post_process( + response=row["prediction"], + right_answer=row["answer"], + task_mode=row["task_mode"], + duration=row["duration"], + ), + axis=1, + ) + + data = pd.concat([data_pred_na, data_un]) + + rejected_count = (data["score"] == -1).sum() + + print( + f"Among {len(data)} questions, " + f"failed to obtain prediction for {len(data_pred_na)} questions, " + f"failed to obtain the score for {rejected_count - len(data_pred_na)} questions. " + f"Those questions will be counted as -1 score in ALL rating, and will not be counted in VALID rating." + ) + + dump(data, score_file) + + rating = get_dimention_rating_mcq_grouding(score_file) + + dump(rating, tgt_file) + + return rating + + +# 评估时,step_2 评估时,给出 [prompt] + image_paths 就行 +class CGBench_OpenEnded(VideoBaseDataset): + + TYPE = "Video-OpenEnded" + + dataset = "CG-Bench_OpenEnded" + + MD5 = "796035eda0b1e916c517cdc1bc145cfc" + + SYS = ( + "You will be provided with sampled frames from a video, along with a " + "question.\n" + "Your task is to analyze the provided frames and infer the most plausible " + "answer based on the visual information.\n" + "If the visual information is ambiguous or insufficient, use the available " + "context to reason your answer.\n" + "Only output the answer in the following format:\n\n" + '```json\n{"result": "answer"}\n```\n\n' + 'The "answer" can be a word, phrase, or sentence that directly responds to ' + "the question.\n\n" + ) + + def __init__( + self, + dataset="CG-Bench_OpenEnded", + use_subtitle=False, + use_subtitle_time=False, + use_frame_time=False, + nframe=0, + fps=-1, + ): + super().__init__(dataset=dataset, nframe=nframe, fps=fps) + self.use_subtitle = use_subtitle + self.use_subtitle_time = use_subtitle_time + self.use_frame_time = use_frame_time + self.dataset_name = dataset + lmu_root = LMUDataRoot() + self.clue_frame_root = osp.join(lmu_root, "clue_images", dataset) + + @classmethod + def supported_datasets(cls): + return ["CG-Bench_OpenEnded"] + + def get_subtitles(self, subtitle_path, frame_indices=None, fps=None, sub_time=False): + + subtitles = [] + + srt_path = osp.join(self.data_root, subtitle_path) + assert osp.exists(srt_path) + import pysubs2 + + subs = pysubs2.load(srt_path, encoding="utf-8") + if not frame_indices: + for sub in subs: + sub_text = sub.text.replace("\\N", " ") + if sub_time: + start_time = milliseconds_to_seconds(sub.start) + end_time = milliseconds_to_seconds(sub.end) + sub_text = f"[{start_time}, {end_time}] {sub_text}" + if sub_text.strip() and sub_text not in subtitles: + subtitles.append(sub_text) + else: + for selected_frame_id in frame_indices: + cur_time = pysubs2.make_time(fps=fps, frames=selected_frame_id) + for sub in subs: + if sub.start < cur_time and sub.end > cur_time: + sub_text = sub.text.replace("\\N", " ") + if sub_time: + start_time = milliseconds_to_seconds(sub.start) + end_time = milliseconds_to_seconds(sub.end) + sub_text = f"[{start_time}, {end_time}] {sub_text}" + if sub_text.strip() and sub_text not in subtitles: + subtitles.append(sub_text) + + if subtitles: + subtitles_str = '\n'.join(subtitles) + return f"The subtitles of the video are as follows:\n\n{subtitles_str}\n\n" + else: + return "" + + def prepare_dataset(self, dataset_name="CG-Bench_OpenEnded", repo_id="CG-Bench/CG-Bench"): + + def check_integrity(pth): + data_file = osp.join(pth, f"{dataset_name}.tsv") + + if not os.path.exists(data_file): + return False + + if md5(data_file) != self.MD5: + return False + data = load(data_file) + for video_pth in data["video"]: + if not osp.exists(osp.join(pth, video_pth)): + return False + + return True + + cache_path = get_cache_path(repo_id) + + if cache_path is not None and check_integrity(cache_path): + dataset_path = cache_path + else: + + def generate_tsv(pth): + + tsv_file = osp.join(pth, f"{dataset_name}.tsv") + + with open(osp.join(pth, "cgbench.json"), "r") as f: + data_file = pd.DataFrame(json.load(f)) + + data_file = data_file.assign(index=range(len(data_file))) + data_file["video"] = data_file["video_uid"].apply(lambda x: f"cg_videos_720p/{x}.mp4") + data_file["subtitle_path"] = data_file["video_uid"].apply( + lambda x: f"cg_subtitles/{x}.srt" if osp.exists(osp.join(pth, f"cg_subtitles/{x}.srt")) else "" + ) + + data_file = data_file[ + [ + "index", + "video_uid", + "video", + "duration", + "domain", + "sub_category", + "subtitle_path", + "question", + "answer", + "clue_intervals", + "qid", + ] + ] + + data_file.to_csv(tsv_file, sep="\t", index=False) + + if modelscope_flag_set(): + from modelscope import dataset_snapshot_download + dataset_path = dataset_snapshot_download(dataset_id=repo_id) + else: + dataset_path = snapshot_download(repo_id=repo_id, repo_type="dataset") + + unzip_hf_zip(dataset_path) + generate_tsv(dataset_path) + + tsv_file = osp.join(dataset_path, f"{dataset_name}.tsv") + + return dict(data_file=tsv_file, root=dataset_path) + + def build_prompt(self, line, video_llm): + + if isinstance(line, int): + assert line < len(self) + line = self.data.iloc[line] + + message = [] + + sys_prompt = self.SYS + + user_prompt = "" + + video_path = line["video"] + + if video_llm: + message.append(dict(type="video", value=osp.join(self.data_root, video_path))) + if self.use_subtitle and line["subtitle_path"] and not pd.isna(line["subtitle_path"]): + if self.nframe: + image_paths, frame_indices, vid_fps = self.save_video_frames( + video_path, uid=line["video_uid"], num_frames=self.nframe, fps=self.fps + ) + user_prompt += self.get_subtitles(line["subtitle_path"], frame_indices=frame_indices, + fps=vid_fps, sub_time=self.use_subtitle_time) + else: + user_prompt += self.get_subtitles(line["subtitle_path"], sub_time=self.use_subtitle_time) + else: + image_paths, frame_indices, vid_fps = self.save_video_frames( + video_path, uid=line["video_uid"], num_frames=self.nframe, fps=self.fps + ) + message.extend(dict(type="image", value=im) for im in image_paths) + + if self.use_frame_time: + user_prompt += get_timestampes(frame_indices, vid_fps) + + if self.use_subtitle and line["subtitle_path"] and not pd.isna(line["subtitle_path"]): + user_prompt += self.get_subtitles( + line["subtitle_path"], frame_indices=frame_indices, fps=vid_fps, + sub_time=self.use_subtitle_time + ) + + question = line["question"] + user_prompt += f"Question: {question}\n\n" + + message.append(dict(type="text", value=sys_prompt + user_prompt)) + + return message + + def clue_frame_paths(self, qid, num_frames=8): + frame_root = osp.join(self.clue_frame_root, qid) + os.makedirs(frame_root, exist_ok=True) + return [osp.join(frame_root, self.frame_tmpl.format(i, num_frames)) for i in range(1, num_frames + 1)] + + def save_video_frames(self, video, uid, clue_intervals=None, num_frames=8, fps=-1): + + if type(uid) is not str: + uid = str(uid) + + vid_path = osp.join(self.data_root, video) + vid = decord.VideoReader(vid_path) + vid_fps = vid.get_avg_fps() + n_frames = len(vid) + + if clue_intervals is not None: + merged_intervals = merge_intervals(clue_intervals) + + if num_frames > 0 and fps < 0: + indices = sample_frames_clue_average(merged_intervals, num_frames, vid_fps) + frame_paths = self.clue_frame_paths(uid, len(indices)) + + elif fps > 0: + frame_indices = [] + for start, end in merged_intervals: + start_frame = int(start * vid_fps) + end_frame = int(end * vid_fps) + step = vid_fps / fps + interval_indices = [ + int(start_frame + i * step) for i in range(int((end_frame - start_frame) / step)) + ] + frame_indices.extend(interval_indices) + + if len(frame_indices) < 32: + indices = sample_frames_clue_average(merged_intervals, 32, vid_fps) + else: + indices = frame_indices + frame_paths = self.clue_frame_paths_fps(uid, len(indices), fps) + + else: + if num_frames > 0 and fps < 0: + step_size = len(vid) / (num_frames + 1) + indices = [int(i * step_size) for i in range(1, num_frames + 1)] + frame_paths = self.frame_paths(uid) + elif fps > 0: + total_duration = n_frames / vid_fps + required_frames = int(total_duration * fps) + step_size = vid_fps / fps + indices = [int(i * step_size) for i in range(required_frames)] + frame_paths = self.frame_paths_fps(uid, len(indices)) + + valid_paths = [] + valid_indices = [] + + if not np.all([osp.exists(p) for p in frame_paths]): + images = [vid[i].asnumpy() for i in indices] + for i, (img_array, path) in enumerate(zip(images, frame_paths)): + if osp.exists(path): + try: + with Image.open(path) as img: + img.verify() + valid_paths.append(path) + valid_indices.append(indices[i]) + except Exception: + continue + else: + try: + img = Image.fromarray(img_array) + img.save(path) + img.verify() + valid_paths.append(path) + valid_indices.append(indices[i]) + except Exception: + continue + else: + for i, path in enumerate(frame_paths): + try: + with Image.open(path) as img: + img.verify() + valid_paths.append(path) + valid_indices.append(indices[i]) + except Exception: + continue + + return valid_paths, valid_indices, vid_fps + + def evaluate(self, eval_file, **judge_kwargs): + + from .utils.cgbench import get_dimention_rating_open_ended, post_process_open + + assert eval_file.endswith(".xlsx"), "data file should be an xlsx file" + + tgt_file = eval_file.replace(".xlsx", "_rating.json") + score_file = eval_file.replace(".xlsx", "_score.xlsx") + step_1_tmp_file = eval_file.replace(".xlsx", "_step_1.pkl") + step_2_tmp_file = eval_file.replace(".xlsx", "_step_2.pkl") + + data = load(eval_file) + + data_pred_no_na = data[~pd.isna(data["prediction"])] + data_pred_na = data[pd.isna(data["prediction"])] + + data_pred_na["model_result"] = -1 + data_pred_na["step_1_result"] = -1 + data_pred_na["step_2_result"] = -1 + data_pred_na["score"] = -1 + + data_pred_no_na["model_result"] = data_pred_no_na.apply( + lambda row: post_process_open( + response=row["prediction"], + ), + axis=1, + ) + + if judge_kwargs.get("model", None) != "gpt-4o-0806": + judge_kwargs["model"] = "gpt-4o-0806" + print("The judge model in cg-bench is gpt-4o-0806!") + + data_no_model_result = data_pred_no_na[data_pred_no_na["model_result"] == -1] + data_step_1 = data_pred_no_na[data_pred_no_na["model_result"] != -1] + + model_step_1 = build_judge(system_prompt=sys_prompt_open_eval_step_1, **judge_kwargs) + nproc = judge_kwargs.pop('nproc', 32) + + lines_step_1 = data_step_1.to_dict("records") + tups_step_1 = [(model_step_1, line) for line in lines_step_1] + + keys_step_1 = {line["qid"] for line in lines_step_1} + + ans = {} + if osp.exists(step_1_tmp_file): + ans = load(step_1_tmp_file) + tups_step_1 = [x for x, i in zip(tups_step_1, keys_step_1) if i not in ans] + keys_step_1 = [i for i in keys_step_1 if i not in ans] + + _ = track_progress_rich( + eval_open_first, + tups_step_1, + nproc=nproc, + keys=keys_step_1, + save=step_1_tmp_file, + ) + + step_1_results = load(step_1_tmp_file) + data_step_1 = save_step_1_steps(data_step_1, step_1_results) # -1, 0, 1, 2 + + data_no_step_1_results = data_step_1[data_step_1["step_1_result"] == -1] + data_step_1_over = data_step_1[data_step_1["step_1_result"].isin([0, 1])] + data_step_2 = data_step_1[data_step_1["step_1_result"] == 2] + + model_step_2 = build_judge(system_prompt=sys_prompt_open_eval_step_2, **judge_kwargs) + + lines_step_2 = data_step_2.to_dict("records") + + tups_step_2 = [] + + for line in tqdm(lines_step_2): + clue_intervals = eval(line["clue_intervals"]) + lmu_root = LMUDataRoot() + clue_frame_root = osp.join(lmu_root, "clue_images", self.dataset) + data_root = self.data_root + frame_paths, _, _ = save_clue_video_frames( + data_root, + clue_frame_root, + video=line["video"], + uid=line["qid"], + clue_intervals=clue_intervals, + num_frames=32, + ) + tups_step_2.append((model_step_2, line, frame_paths)) + + keys_step_2 = {line["qid"] for line in lines_step_2} + + ans = {} + if osp.exists(step_2_tmp_file): + ans = load(step_2_tmp_file) + tups_step_2 = [x for x, i in zip(tups_step_2, keys_step_2) if i not in ans] + keys_step_2 = [i for i in keys_step_2 if i not in ans] + + _ = track_progress_rich( + eval_open_second, + tups_step_2, + nproc=nproc, + keys=keys_step_2, + save=step_2_tmp_file, + ) + + step_2_results = load(step_2_tmp_file) + data_step_2 = save_step_2_steps(data_step_2, step_2_results) + + data_no_step_2_results = data_step_2[data_step_2["score"] == -1] + data_step_2_over = data_step_2[data_step_2["score"].isin([0, 1])] + + data = pd.concat( + [ + data_pred_na, + data_no_model_result, + data_no_step_1_results, + data_step_1_over, + data_no_step_2_results, + data_step_2_over, + ] + ) + + dump(data, score_file) + + rating = get_dimention_rating_open_ended(score_file) + + dump(rating, tgt_file) + + return rating diff --git a/vlmeval/dataset/cmmmu.py b/vlmeval/dataset/cmmmu.py new file mode 100644 index 0000000000000000000000000000000000000000..12c583f292103f32548621e39961e0282c275fbd --- /dev/null +++ b/vlmeval/dataset/cmmmu.py @@ -0,0 +1,354 @@ +from .image_base import ImageBaseDataset +import random +from collections import Counter +import os +import re +import tempfile +from ..smp import * + + +def get_multi_choice_prediction(response, all_choices, index2ans): + for char in [',', '.', '!', '?', ';', ':', "'"]: + response = response.strip(char) + response = " " + response + " " # add space to avoid partial match + + candidates = [] + + for choice in all_choices: # (A) (B) (C) (D) + # Add the choice to candidates each time it appears in the response + candidates.extend([choice for _ in range(response.count(f'({choice})'))]) + + if len(candidates) == 0: + for choice in all_choices: # A B C D + # Similarly, add the choice for each occurrence + candidates.extend([choice for _ in range(response.count(f'{choice}'))]) + + if len(candidates) == 0 and len(response.split()) >= 1: + for index, ans in index2ans.items(): + # Add index for each occurrence of ans in response + candidates.extend([index for _ in range(response.count(ans))]) + + # if all above doesn't get candidates, check if the content is larger than 5 tokens and try to parse the example + if len(candidates) == 0 and len(response.split()) >= 1: + for index, ans in index2ans.items(): + if ans in response: + candidates.append(index) + # index_ans = False # it's content ans. + + if len(candidates) == 0: # still not get answer, randomly choose one. + return random.choice(all_choices) + # return '' + else: + # Count the occurrence of each candidate + candidate_counts = Counter(candidates) + + # Select the most frequent candidates + max_count = max(candidate_counts.values()) + most_frequent_candidates = [c for c in all_choices if candidate_counts.get(c, 0) == max_count] + + # Combine the most frequent candidates in ABCD order + return ''.join(most_frequent_candidates) + + +def extract_numbers(string): + # Pattern for numbers with Chinese commas + pattern_commas = r'-?\d{1,3}(?:,\d{3})+' + # Pattern for scientific notation + pattern_scientific = r'-?\d+(?:\.\d+)?[eE][+-]?\d+' + # Pattern for simple numbers without Chinese commas + pattern_simple = r'-?(?:\d+\.\d+|\.\d+|\d+)(?![eE][+-]?\d+)(?!,\d)' + + # Extract numbers with Chinese commas + numbers_with_commas = re.findall(pattern_commas, string) + # Extract numbers in scientific notation + numbers_scientific = re.findall(pattern_scientific, string) + # Extract simple numbers without Chinese commas + numbers_simple = re.findall(pattern_simple, string) + + # Combine all extracted numbers + all_numbers = numbers_with_commas + numbers_scientific + numbers_simple + return all_numbers + + +def check_is_number(string): + try: + float(string.replace(',', '')) + return True + except ValueError: + # check if there's comma inside + return False + + +def count_letters(string): + return sum(c.isalpha() and 'a' <= c <= 'z' or 'A' <= c <= 'Z' for c in string) + + +def normalize_str(string, answer): + # check if characters in the string + + # if number, numerize it. + if string is None: + return [string] + string = string.strip() + + is_number = check_is_number(string) + + if is_number: + string = string.replace(',', '') + string = float(string) + # leave 2 decimal + string = round(string, 2) + return [string] + else: # it's likely to be a string + if len(string) > len(answer) + 20 or count_letters(string) > count_letters(answer) + 2: + return [] + return [string] + + +def get_fill_blank_prediction(response, answer): + """get the prediction from the generated response, + return a list of predicted strings or numbers""" + + def get_key_subresponses(response): + response = response.strip("。").strip() + sub_responses = re.split(r'。|\n', response) + indicators_of_keys = ['是', '为', '所以', '等于', '方案', '选择', + '正确答案', '因此', '最后', '答案', '结果'] + key_responses = [] + for index, resp in enumerate(sub_responses): + # if last one, accept it's an equation (the entire response can be just one sentence with equation) + if index == len(sub_responses) - 1: + indicators_of_keys.extend(['=']) + shortest_key_response = None + # the shortest response that may contain the answer (tail part of the response) + for indicator in indicators_of_keys: + if indicator in resp: + if not shortest_key_response: + shortest_key_response = resp.split(indicator)[-1].strip() + else: + if len(resp.split(indicator)[-1].strip()) < len(shortest_key_response): + shortest_key_response = resp.split(indicator)[-1].strip() + + if shortest_key_response: + # and it's not trivial + if shortest_key_response.strip() not in [":", ",", ".", "!", "?", ";", ":", "'"]: + key_responses.append(shortest_key_response) + if len(key_responses) == 0: # did not found any + return [response] + return key_responses + + key_responses = get_key_subresponses(response) + + pred_list = key_responses.copy() # keep the original string response + for resp in key_responses: + pred_list.extend(extract_numbers(resp)) + + tmp_pred_list = [] + for i in range(len(pred_list)): + tmp_pred_list.extend(normalize_str(pred_list[i], answer)) + pred_list = tmp_pred_list + + # remove duplicates + pred_list = list(set(pred_list)) + + return pred_list + + +def get_TF_prediction(response): + """get the prediction from the generated response, + return a list of predicted strings or numbers""" + + def get_key_subresponses(response): + response = response.strip("。").strip() + sub_responses = re.split(r'。|\n', response) + indicators_of_keys = ['是', '为', '所以', '判断', + '陈述', '说法', '表达', '答案', '结果'] + key_responses = [] + for index, resp in enumerate(sub_responses): + shortest_key_response = None + # the shortest response that may contain the answer (tail part of the response) + for indicator in indicators_of_keys: + if indicator in resp: + if not shortest_key_response: + shortest_key_response = resp.split(indicator)[-1].strip() + else: + if len(resp.split(indicator)[-1].strip()) < len(shortest_key_response): + shortest_key_response = resp.split(indicator)[-1].strip() + + if shortest_key_response: + # and it's not trivial + if shortest_key_response.strip() not in [":", ",", ".", "!", "?", ";", ":", "'"]: + key_responses.append(shortest_key_response) + if len(key_responses) == 0: # did not found any + return [response] + return key_responses + + key_responses = get_key_subresponses(response) + + pred_list = key_responses.copy() # keep the original string response + # remove duplicates + pred_list = list(set(pred_list)) + + return pred_list + + +class CMMMU(ImageBaseDataset): + TYPE = 'VQA' + + DATASET_URL = { + 'CMMMU_VAL': 'https://opencompass.openxlab.space/utils/VLMEval/CMMMU_VAL.tsv' + } + + DATASET_MD5 = { + 'CMMMU_VAL': 'b4727e2fce2415bf646379e60c11a726' + } + + def dump_image(self, line): + os.makedirs(self.img_root, exist_ok=True) + + tgt_path_z = [] + if isinstance(line['image'], list): + for i in range(len(line['image'])): + tgt_path = osp.join(self.img_root, f"{line['index']}--{i + 1}.jpg") + if not read_ok(tgt_path): + decode_base64_to_image_file(line['image'][i], tgt_path) + tgt_path_z.append(tgt_path) + else: + tgt_path = osp.join(self.img_root, f"{line['index']}.jpg") + if not read_ok(tgt_path): + decode_base64_to_image_file(line['image'], tgt_path) + tgt_path_z.append(tgt_path) + return tgt_path_z + + @classmethod + def evaluate(self, eval_file, **judge_kwargs): + + suffix = eval_file.split('.')[-1] + result_file = eval_file.replace(f'.{suffix}', '_acc.csv') + + if not osp.exists(result_file): + data = load(eval_file) + assert 'answer' in data and 'prediction' in data + data['prediction'] = [str(x) for x in data['prediction']] + data['answer'] = [str(x) for x in data['answer']] + + correct_count = 0 + correct_category = { + '技术与工程': [0, 0], + '科学': [0, 0], + '健康与医学': [0, 0], + '商业': [0, 0], + '艺术与设计': [0, 0], + '人文社会科学': [0, 0], + } + + for i in tqdm(data.iterrows()): + line = i[1] + correct_category[line['category']][0] += 1 + + # Options + if line['type'] == '选择': + index2ans = { + 'A': line['option1'], + 'B': line['option2'], + 'C': line['option3'], + 'D': line['option4'] + } + fact_option = get_multi_choice_prediction(line['prediction'], ['A', 'B', 'C', 'D'], index2ans) + if fact_option == line['answer']: + correct_count += 1 + correct_category[line['category']][1] += 1 + + # Binary + elif line['type'] == '判断': + positive_keywords = ['正确', '对', '准确', '肯定', '对的'] + negative_keywords = ['不对', '错误', '不正确', '不准确', '不合适', '否定', '错的', '错'] + ambiguous_keywords = ['对错', '是否正确', '否正确', '或者', '是否', '正确性', '对不'] + + def judge_similarity(pred_list, positive_keywords, negative_keywords): + positive_count = 0 + negative_count = 0 + + for pred in pred_list: + if any(pos_word in pred for pos_word in positive_keywords): + positive_count += 1 + elif any(neg_word in pred for neg_word in negative_keywords): + negative_count += 1 + + if positive_count > negative_count: + return "对" + elif negative_count > positive_count: + return "错" + else: + return random.choice(['对', '错']) + + answer = get_TF_prediction(line['prediction']) + answer = [word for word in answer if not any(ambiguous in word for ambiguous in ambiguous_keywords)] + fact_answer = judge_similarity(answer, positive_keywords, negative_keywords) + if fact_answer == line['answer']: + correct_count += 1 + correct_category[line['category']][1] += 1 + + # Fill the Blank + else: + norm_answers = normalize_str(line['answer'], line['answer']) + predicted_answer = get_fill_blank_prediction(line['prediction'], line['answer']) + + for pred in predicted_answer: + # already normalized + if isinstance(pred, str): # if it's a string, then find if ans in the pred_i + for norm_ans in norm_answers: + # only see if the string answer in the string pred + # print(norm_ans, pred) + if isinstance(norm_ans, str) and norm_ans in pred: + correct_count += 1 + correct_category[line['category']][1] += 1 + else: # it's a number + if pred in norm_answers: + correct_count += 1 + correct_category[line['category']][1] += 1 + + accuracyz = {} + accuracyz['总准确率'] = correct_count / len(data) + for i in correct_category.keys(): + accuracyz[i] = correct_category[i][1] / correct_category[i][0] + + accuracyz = d2df(accuracyz) + accuracyz.round(10) + dump(accuracyz, result_file) + + result = pd.read_csv(result_file) + return result + + def build_prompt(self, line): + if line['type'] == '选择': + tgt_path = self.dump_image(line) + question = line['question'] + options_prompt = 'Options:\n' + + for i in [['A', '1'], ['B', '2'], ['C', '3'], ['D', '4']]: + options_prompt += i[0] + '. ' + line['option' + i[1]] + '\n' + + prompt = (f'问题: {question}\n' + options_prompt + + '请回答上述多项选择题,并选出正确选项。这些题目可能包括单选和多选题型。如果所提供的信息不足以确定一个明确的答案,那么请根据可用的数据和你的判断来选择最可能正确的选项。') + + msgs = [] + if isinstance(tgt_path, list): + msgs.extend([dict(type='image', value=p) for p in tgt_path]) + else: + msgs = [dict(type='image', value=tgt_path)] + msgs.append(dict(type='text', value=prompt)) + + return msgs + + elif line['type'] == '判断': + msgs = super().build_prompt(line) + assert msgs[-1]['type'] == 'text' + msgs[-1]['value'] += '\n请回答上述判断题,并根据题目描述和所给的信息来判断问题中陈述的对错。如果信息不完整或不足以作出绝对判断,请运用你的逻辑推理和现有信息来做出最可能的判断。' + return msgs + + else: + msgs = super().build_prompt(line) + assert msgs[-1]['type'] == 'text' + msgs[-1]['value'] += '\n请回答上述填空题,并根据题目的要求和所提供的信息来给出最恰当的答案。如果信息不足以确切回答,那么请依据现有的数据和你的推理能力来填写最合理的答案。' + return msgs diff --git a/vlmeval/dataset/creation.py b/vlmeval/dataset/creation.py new file mode 100644 index 0000000000000000000000000000000000000000..8ee49c9622efc21afa45504513a07f0d751bbb6d --- /dev/null +++ b/vlmeval/dataset/creation.py @@ -0,0 +1,738 @@ +from .image_base import ImageBaseDataset +import numpy as np +import pandas as pd +from ..smp import * +from .utils import build_judge, DEBUG_MESSAGE +from ..utils import track_progress_rich +import re + +prompt_dict = {} +prompt_dict['LiveMMBench_Creation'] = { + # Subjective Judge [GPT-4o reference] + 'subjective':""" +Please act as an impartial judge and evaluate the quality of two responses provided by AI assistants to the user prompt. + +Your task is to carefully assess two responses based on provided instructions and evaluation criteria. After evaluating both responses, determine which response features better quality and better meets the criteria. If both responses are similar or nearly identical in quality, you should indicate a tie. Avoid position bias toward the first or second response. + +Suggested Steps for Evaluation: +1. Review both responses independently and then carefully compare their strengths and weaknesses. A good response should feature good language quality, follow the user instruction and meet as many criteria as possible. +2. After completing the first evaluation, swap the positions of response A and B and repeat Step 1 and get the 2nd evaluation outcome. This helps to mitigate the potential position bias. +3. After completing both evaluations (in the original and reversed order), combine your analysis and provide a final conclusion based on the overall assessment. If both responses are relatively similar, or the differences are minimal and hard to distinguish, your conclusion should indicate a tie ([[A=B]]). + +Your **conclusion** should be one of the following options (A, B are of the original order): +1. [[A>>B]]: Response A is clearly better than Response B. +2. [[A>B]]: Response A is slightly better than Response B. +3. [[A=B]]: Response A is nearly identical to Response B. +4. [[B>A]]: Response B is slightly better than Response A. +5. [[B>>A]]: Response B is clearly better than Response A. + +User Instruction:\n[INSTRUCTIONS]\n{instructions}\n[END INSTRUCTIONS]\n\n +Repsonse A:\n[RESPONSE A]\n{reference_answer_by_gpt4o}\n[END RESPONSE A]\n\n +Response B:\n[RESPONSE B]\n{prediction}\n[END RESPONSE B]\n\n +Evaluation Criteria:\n[CRITERIA]\n{criteria}\n[END CRITERIA]\n\n + +Your output should include: +1. Conclusion: Your final conclusion based on the overall assessment. +2. Reasoning: Your reasoning process and analysis of the two responses. + +Your output should follow the following format (CONCLUSION should be one of the five options: A>>B, A>B, A=B, B>A, B>>A): + +Final Conclusion: [[CONCLUSION]] +Reasoning Process: [REASONING]\n +""", + + # Criteria Alignment w/o GT + 'objective_without_gt':""" +Please act as an impartial judge and evaluate the **Criteria Alignment** of the two responses provided by AI assistants to the user prompt. The responses were generated based on the provided instructions and visual input from images. + +Suggested Steps for Evaluation: +1. Evaluate **Criteria Alignment** of both responses based on the criteria. + • If a criterion consist of **X aspects**, each aspect is worth **10 / X points**. + • For each aspect, there may be multiple sub-criteria. If there are **Y sub-criteria for the aspect**, each sub-criterion worths **10 / (X * Y) points**. +2. Assign a total score out of 10 for each response. + +User Instruction:\n[INSTRUCTIONS]\n{instructions}\n[END INSTRUCTIONS]\n\n +Repsonse A:\n[RESPONSE A]\n{reference_answer_by_gpt4o}\n[END RESPONSE A]\n\n +Response B:\n[RESPONSE B]\n{prediction}\n[END RESPONSE B]\n\n +Criteria:\n[CRITERIA]\n{criteria}\n[END CRITERIA]\n\n + +Your output should evaluate alignment scores of each response and end with a conclusion in the following format (The full score is 10. X, Y are alignment scores for Response A and B): + +Response A Alignment Score: X/10 +Response B Alignment Score: Y/10\n +""", + + # Criteria Alignment w. GT + 'objective_with_gt':""" +Please act as an impartial judge and evaluate the **Criteria Alignment** of the two responses provided by AI assistants to the user prompt. The responses were generated based on the provided instructions and visual input from images. There is also a ground truth corresponding to the instructions provided for reference. +Take this context into account when making your judgment. + +Steps for Evaluation: +1. Evaluate **Criteria Alignment** of both responses based on the criteria and the ground truth. + • If a criterion consist of **X aspects**, each aspect is worth **10 / X points**. + • For each aspect, there may be multiple sub-criteria. If there are **Y sub-criteria for the aspect**, each sub-criterion worths **10 / (X * Y) points**. +2. Assign a total score out of 10 for each response. + +User Instruction:\n[INSTRUCTIONS]\n{instructions}\n[END INSTRUCTIONS]\n\n +Ground Truth:\n[GROUND TRUTH]\n{groundtruth}\n[END GROUND TRUTH]\n\n +Repsonse A:\n[RESPONSE A]\n{reference_answer_by_gpt4o}\n[END RESPONSE A]\n\n +Response B:\n[RESPONSE B]\n{prediction}\n[END RESPONSE B]\n\n +Criteria:\n[CRITERIA]\n{criteria}\n[END CRITERIA]\n\n + +Your output should evaluate alignment scores of each response and end with a conclusion in the following format (The full score is 10. X, Y are alignment scores for Response A and B): + +Response A Alignment Score: X/10 +Response B Alignment Score: Y/10\n +""", +} + +prompt_dict['Creation_MMBench'] = { + # Subjective Judge [GPT-4o reference, with image] + 'subjective':""" +Please act as an impartial judge and evaluate the quality of the responses provided by two AI assistants to the user prompt below, considering both the provided criteria and the image. + +Your task is to carefully assess each response based on how well it meets the evaluation criteria, incorporating the visual context from the image. The criteria should be the primary basis for your judgment, with the image serving to complement and inform your analysis. + +Steps for Evaluation: + 1. Review Both Responses Independently: + Carefully analyze Assistant A’s and Assistant B’s responses with the criteria and the image. Do not assume any response is better just because it is listed first. Each response should be independently assessed based on the criteria and aided by images to help understand the context. + + 2. Compare the Strengths and Weaknesses: + After evaluating each response independently, compare the two. Consider both the quality of the content and how closely it aligns with the criteria and image. Identify the strengths and weaknesses of each response, and highlight the key differences. + + 3. Ensure Fairness: + To avoid positional bias, swap the positions of Assistant A and Assistant B after the first evaluation (i.e., make Assistant A become Assistant B and vice versa) and repeat the analysis and comparison. This ensures that each response is evaluated impartially under the same criteria. + + 4. Provide a Conclusion Based on Both Evaluations: + After completing both evaluations (original and swapped positions), combine your analysis to provide a final verdict. If the responses are similar, with only minimal differences, your judgment should reflect that and indicate a tie. + +Possible Verdict Options: + +• If Assistant A is clearly better in both evaluations: [[A>>B]] +• If Assistant A is slightly better in both evaluations: [[A>B]] +• If both responses are nearly identical, showing minimal differences and no clear advantage: [[A=B]] +• If Assistant B is slightly better in both evaluations: [[B>A]] +• If Assistant B is clearly better in both evaluations: [[B>>A]] + +Instructions to the AI Assistants: + +[INSTRUCTIONS] +{instructions} +[END INSTRUCTIONS] + +Assistant A Response: + +[ASSISTANT A] +{reference_answer_by_gpt4o} +[END ASSISTANT A] + +Evaluation Criteria: + +[CRITERIA] +{criteria} +[END CRITERIA] + +Assistant B Response: + +[ASSISTANT B] +{prediction} +[END ASSISTANT B] + +Output Format: + +Your output should include: + 1. Evaluation of Assistant A’s Response: Provide a detailed qualitative evaluation, focusing on how well Assistant A’s response aligns with the criteria and the image. + 2. Evaluation of Assistant B’s Response: Provide a detailed qualitative evaluation, focusing on how well Assistant B’s response aligns with the criteria and the image. + 3. Final Verdict: After considering both evaluations, select one of the following verdicts and justify it based on your analysis: + +Your output format should end like this: +Assistant A Evaluation: [qualitative comment] +Assistant B Evaluation: [qualitative comment] +Final Verdict is: [[VERDICT]] +""", + +##### For Visual Factuality + 'objective_without_gt':""" +Please act as an impartial judge and evaluate the **Visual Factuality** of the responses provided by two AI assistants to the user prompt displayed below. + +The responses were generated based on the provided instructions and visual input from images. Take this context into account when making your judgment. + +Steps for Evaluation: +1. Evaluate visual factuality for both responses based on the visual factuality criteria. + • If the visual factuality criteria consist of **X aspects**, each aspect is worth **10/X points**. + • For each aspect, there may be multiple small criteria. If there are **Y small criteria in one aspect**, each small criterion is worth **10/X/Y points**. +2. Assign a total score out of 10 for each response. + +Instructions to the AI assistants: +[INSTRUCTIONS] +{instructions} +[END INSTRUCTIONS] + +Assistant A response: +[ASSISTANT A] +{reference_answer_by_gpt4o} +[END ASSISTANT A] + +Visual Factuality Criteria: +[VISUAL FACTUALITY CRITERIA] +{criteria} +[END CRITERIA] + +Assistant B response: +[ASSISTANT B] +{prediction} +[END ASSISTANT B] + +Your output should evaluate visual factuality scores for each assistant and end like this: + +Response A Visual Factuality Score: X/10 +Response B Visual Factuality Score: Y/10 +""", + + 'objective_with_gt':""" +Please act as an impartial judge and evaluate the **Visual Factuality** of the responses provided by two AI assistants to the user prompt displayed below. + +The responses were generated based on the provided instructions and visual input from images. +There is a provided ground truth for the instructions, but the ground truth was not given to the AI assistants when generating their responses. +Take this context into account when making your judgment. + +Steps for Evaluation: +1. Evaluate visual factuality for both responses based on the provided ground truth and visual factuality criteria. + • If the visual factuality criteria consist of **X aspects**, each aspect is worth **10/X points**. + • For each aspect, there may be multiple small criteria. If there are **Y small criteria in one aspect**, each small criterion is worth **10/X/Y points**. +2. Assign a total score out of 10 for each response. + +Instructions to the AI assistants: +[INSTRUCTIONS] +{instructions} +[END INSTRUCTIONS] + +Assistant A response: +[ASSISTANT A] +{reference_answer_by_gpt4o} +[END ASSISTANT A] + +Visual Factuality Criteria: +[VISUAL FACTUALITY CRITERIA] +{criteria} +[END CRITERIA] + +Assistant B response: +[ASSISTANT B] +{prediction} +[END ASSISTANT B] + +Ground truth: +[GROUND TRUTH] +{groundtruth} +[END GROUND TRUTH] + +Your output should evaluate visual factuality scores for each assistant and end like this: + +Response A Visual Factuality Score: X/10 +Response B Visual Factuality Score: Y/10 +""", +} + +creation_mmbench_category_dict = { + 'CATEGORY_Literary_Writing': [ + 'story_continue', + 'landscape_to_poem', + 'historical_story_creation', + 'story_novel_creation', + 'prose_writing_scenery', + 'art_inspired_prose', + 'daily_conversation_creation', + 'children_book_illustration_dialogue_creation' + ], + 'CATEGORY_Common_Functionality_Writing':[ + 'ins_simple_daily_copywriter', + 'travel_journal', + 'short_video_scripts_for_social_media', + 'social_media_travel_content', + 'daily_achievement_show_off', + 'scientific_research_simple_promotion', + 'twitter_comment_on_daily_news', + 'personal_event_summaries', + 'daily_affairs_inquiries', + 'business_collaborative_email_writing', + 'daily_emotional_email_writing', + 'letter_of_complaint', + 'daily_invitation_email_writing', + 'holiday_card_writing', + 'letter_of_application', + 'product_usage_experience_review', + 'store_experience_review', + 'public_welfare_activity_participation_initiative' + ], + 'CATEGORY_Professional_Functionality_Writing': [ + 'museum_guide_word_creation', + 'recipe_infer_and_guide', + 'landscape_introduction', + 'drafting_announcements_for_public_spaces', + 'floor_plan_renovation_design', + 'teaching_plan', + 'nutritional_formulation_of_recipe', + 'clothing_match_design', + 'software_engineering_diagram_explanation', + 'event_planning_and_venue_arrangement', + 'ui_design_analysis_and_optimization', + 'attraction_promotional_words', + 'product_marketing_strategy', + 'script_writing_for_product_advertisement_promotional_video', + 'residence_reasoning', + 'scientific_diagram_understanding', + 'pulitzer_prize_judge', + 'architecture_appreciation', + 'company_team_amuse_broadcast' + ], + 'CATEGORY_Creative_Multimodal_Understanding': [ + 'travel_itinerary_planning_and_recommendations', + 'photography_appreciation', + 'meme_explanation', + 'advertisement_explanation', + 'document_understanding', + 'snapshot_analysis' + ] + +} + +def is_criteria_valid(criteria): + import re + for value in criteria.values(): + if value == '\\' or value == '' or not re.search('[a-zA-Z]', value): + return False + return True + +key_mapping = { + "sub_parse_ok": "preference_parse_ok", + "sub_dist": "preference_dist", + "win_rate": "win_rate", + "sub_reward": "reward", + "obj_parse_ok": "visual_factuality_parse_ok", + "obj_score": "visual_factuality_score", + "obj_ref_score": "visual_factuality_ref_score" +} + +def rename_keys(data, key_mapping): + if isinstance(data, dict): + new_data = {} + for key, value in data.items(): + new_key = key_mapping.get(key, key) + new_data[new_key] = rename_keys(value, key_mapping) + return new_data + elif isinstance(data, list): + return [rename_keys(item, key_mapping) for item in data] + else: + return data + + +def build_prompt(line, dataset_name): + try: + criteria = eval(line['criteria']) + except: + criteria = line['criteria'] + + if isinstance(criteria, dict): + new_criteria = {} + for k in criteria: + if 'subjective' in k.lower(): + new_criteria['subjective'] = criteria[k] + else: + new_criteria['objective'] = criteria[k] + else: + assert isinstance(criteria, str) + new_criteria = {'subjective': criteria} + criteria = new_criteria + assert 'subjective' in criteria, 'No subjective criteria found in the criteria dict' + + prompts = {} + if listinstr(['Creation_MMBench'], dataset_name): + dataset_name = 'Creation_MMBench' + prompts['subjective'] = prompt_dict[dataset_name]['subjective'].format( + instructions=line['question'], + criteria=criteria['subjective'], + reference_answer_by_gpt4o=line['reference_answer_by_gpt4o'], + prediction=line['prediction'] + ) + if 'objective' in criteria: + if 'ground_truth' in line and (not pd.isna(line['ground_truth'])) and line['ground_truth'] != '': + prompts['objective'] = prompt_dict[dataset_name]['objective_with_gt'].format( + instructions=line['question'], + criteria=criteria['objective'], + groundtruth=line['ground_truth'], + reference_answer_by_gpt4o=line['reference_answer_by_gpt4o'], + prediction=line['prediction']) + else: + prompts['objective'] = prompt_dict[dataset_name]['objective_without_gt'].format( + instructions=line['question'], + criteria=criteria['objective'], + reference_answer_by_gpt4o=line['reference_answer_by_gpt4o'], + prediction=line['prediction']) + return prompts + + +def Generate_Creation_MMBench_judge(model, image_list, prompt): + assert isinstance(prompt, dict) + response = {} + for key in prompt.keys(): + if image_list and key == 'subjective': + input_msg = [] + for img_path in image_list: + if read_ok(img_path): + input_msg.append({'type': 'image', 'value': img_path}) + else: + raise ValueError(f"Image not found: {img_path}") + input_msg.append({'type': 'text', 'value': prompt[key]}) + # print(f'using image {image_list} and text') + response[key] = model.generate(input_msg) + else: + response[key] = model.generate(prompt[key]) + return response + + +def extract_subjective(inp, dataset_name): + mapping_dict = { + 'LiveMMBench_Creation': 'FINAL CONCLUSION:', + 'Creation_MMBench': 'FINAL VERDICT IS:' + } + lines = inp.split('\n') + for line in lines: + line = line.upper() + if line.startswith(mapping_dict[dataset_name]): + rem = line.split(mapping_dict[dataset_name])[1].strip() + rem = rem.split('[[')[1].split(']]')[0].strip() + cands = [ + 'A>>B', 'A>B', 'A=B', 'B>A', 'B>>A', + 'B<>A' in text: + return 2 + elif 'AA' in text: + return 1 + elif 'A=B' in text or 'B=A' in text: + return 0 + elif 'A>B' in text or 'B>B' in text or 'B< 0 and not self.is_api: + concatenated_images = concat_images(tgt_path, max_concat=self.concat_num, column_num=self.column_num) + + old_tgt_path = tgt_path + assert isinstance(old_tgt_path, list) + if self.column_num != -1: + tgt_path = [ + '_'.join(old_tgt_path[0].split('_')[:-1]) + '_concat{}_{}.jpg'.format(self.concat_num, i) + for i in range(len(concatenated_images)) + ] + else: + tgt_path = ['_'.join(old_tgt_path[0].split('_')[:-1]) + '_concat_all.jpg'] + + for path, concatenated_image in zip(tgt_path, concatenated_images): + if not read_ok(path): + decode_base64_to_image_file(encode_image_to_base64(concatenated_image), path) + num_images, image_size = len(old_tgt_path), concatenated_image.size + print('concat {} images to a new one with size {}. save at {}'.format(num_images, image_size, path)) + return tgt_path + + @classmethod + def evaluate(self, eval_file, **judge_kwargs): + logger = get_logger('Evaluation') + model = judge_kwargs['model'] + + suffix = eval_file.split('.')[-1] + storage = eval_file.replace(f'.{suffix}', f'_{model}.xlsx') + tmp_file = eval_file.replace(f'.{suffix}', f'_{model}.pkl') + + if osp.exists(storage): + logger.warning(f'GPT scoring file {storage} already exists, will reuse it in DUDE_eval. ') + else: + data = load(eval_file) + model = build_judge(max_tokens=128, **judge_kwargs) + lt = len(data) + lines = [data.iloc[i] for i in range(lt)] + tups = [(model, line) for line in lines] + indices = [line['index'] for line in lines] + + ans = {} + if osp.exists(tmp_file): + ans = load(tmp_file) + tups = [x for x, i in zip(tups, indices) if i not in ans] + indices = [i for i in indices if i not in ans] + + if len(indices): + new_results = list() + for model, line in tqdm(tups): + res = MMLongBench_auxeval(model, line) + new_results.append(res) + + log_map, res_map, pred_map = {}, {}, {} + all_inds = [line['index'] for line in lines] + for k, v in zip(all_inds, new_results): + log_map[k] = v['log'] + res_map[k] = v['res'] + pred_map[k] = v['pred'] + data['res'] = [res_map[idx] for idx in data['index']] + data['log'] = [log_map[idx] for idx in data['index']] + data['pred'] = [pred_map[idx] for idx in data['index']] + dump(data, storage) + + score = DUDE_acc(storage) + score_pth = storage.replace('.xlsx', '_score.csv') + + dump(score, score_pth) + logger.info(f'DUDE successfully finished evaluating {eval_file}, results saved in {score_pth}') + logger.info('Score: ') + logger.info(score) diff --git a/vlmeval/dataset/dynamath.py b/vlmeval/dataset/dynamath.py new file mode 100644 index 0000000000000000000000000000000000000000..cf6979716c6eb3bd8d391bb1830f5416ae16bf75 --- /dev/null +++ b/vlmeval/dataset/dynamath.py @@ -0,0 +1,259 @@ +import re +import json +import numpy as np +import pandas as pd +import sys +import math +import os +import os.path as osp +import argparse + +from .image_base import ImageBaseDataset +from .utils import build_judge +from ..utils import track_progress_rich +from ..smp import load, dump, d2df, toliststr + + +def preprocess(str1): + str1 = str(str1) + if 0 <= str1.find("{") < str1.rfind("}"): + str1 = str1[str1.find("{"): str1.rfind("}") + 1] + str2 = str1.replace("\\", "") + str2 = str2.replace("\\n", "\n") + return str2 + + +def transfer(str1): + if "\u03c0" in str1: + strs = str1.split('\u03c0') + str1 = strs[0] + return float(str1) * np.pi + else: + return float(str1) + +def extract_options(text): + pattern = r'\((\w)\)\s([^()]+)' + try: + text = str(text) + matches = re.findall(pattern, text) + options_dict = {option: description.strip() for option, description in matches} + return options_dict + except: + return {} + +def parse_answer(answer, answer_type="multiple choice", question=None): + if answer_type == "float": + if answer.isdigit(): + return True, float(answer) + else: + parts = answer.split(' ') + answer = parts[0] + try: + answer = transfer(answer) + return True, answer + except: + return False, None + elif answer_type == "multiple choice": + if len(answer) == 1 and answer.upper() in 'ABCDE': + return True, answer.upper() + else: + options = extract_options(question) + options = {v: k for k, v in options.items()} + if answer.strip().lower() in options: + return True, options[answer.strip().lower()] + return False, None + else: + return True, answer + + +def DynaMath_auxeval(model, line): + pred = line['prediction'] + pred = preprocess(pred) + + succeed, short_answer = None, None + try: + dj = json.loads(pred, strict=False) + short_answer = dj.get("short answer") + assert short_answer is not None + succeed, short_answer = parse_answer(short_answer, answer_type=line['answer_type']) + assert succeed + except: + # Failed to parse the JSON, use an auxiliary LLM to get the short answer + if line['answer_type'] == 'multiple choice': + inst = "Output the corresponing choice option, such as 'A', 'B', 'C', 'D', in a single line. Output 'Z' if the answer is not in the options." + elif line['answer_type'] == 'float': + inst = "Output a three-digit floating-point number in a single line." + else: + inst = ( + "Output a short answer in a single line. Any float numbers in the answer " + "should be formatted as three-digit floating-point numbers." + ) + if line['answer_type'] == 'multiple choice': + options = extract_options(line['question']) + opt = "" + for k, v in options.items(): + opt += f"({k}) {v} " + prompt = f"Free-form answer: {pred}\nOptions:{opt}\nInstruction: {inst}" + else: + prompt = f"Free-form answer: {pred}\nInstruction: {inst}" + response = pred + succeed, short_answer = parse_answer(response, line['answer_type'], line['question']) + if not succeed: + response = model.generate(prompt) + succeed, short_answer = parse_answer(response, line['answer_type']) + + if line['answer_type'] == 'float': + if succeed: + diff = float(short_answer) - float(line['answer']) + if abs(diff) <= 0.001: + return dict(parse=True, extracted=short_answer, correct=True) + else: + return dict(parse=True, extracted=short_answer, correct=False) + else: + return dict(parse=False, extracted=None, correct=False) + elif line['answer_type'] == 'multiple choice': + if succeed: + return dict(parse=True, extracted=short_answer, correct=(short_answer == line['answer'])) + else: + if line['answer'] in pred[:3].upper(): + return dict(parse=False, extracted=None, correct=True) + else: + return dict(parse=False, extracted=None, correct=False) + else: + if succeed: + return dict(parse=True, extracted=short_answer, correct=(short_answer.lower() in line['answer'].lower())) + else: + return dict(parse=False, extracted=None, correct=(short_answer.lower() in line['answer'].lower())) + + +class Dynamath(ImageBaseDataset): + + TYPE = 'VQA' + DATASET_URL = { + 'DynaMath': 'https://opencompass.openxlab.space/utils/VLMEval/DynaMath.tsv', + 'DynaMath_noprompt': 'https://opencompass.openxlab.space/utils/VLMEval/DynaMath.tsv', + } + DATASET_MD5 = { + 'DynaMath': 'b8425ad9a7114571fc9366e013699494', + 'DynaMath_noprompt': 'b8425ad9a7114571fc9366e013699494', + } + GUIDE = """ +## Answer Instruction Please provide an answer to the question outlined above. Your response should adhere \ +to the following JSON format, which includes two keys: 'solution' and 'short answer'. The 'solution' key can contain \ +detailed steps needed to solve the question, and the 'short answer' key should provide a concise response. {INST} + +Example of expected JSON response format: + +""" + EXAMPLE = { + "solution": "[Detailed step-by-step explanation]", + "short answer": "[Concise Answer]" + } + TEXT_EXAMPLE = json.dumps(EXAMPLE, indent=4) + + # Given one data record, return the built prompt (a multi-modal message), can override + def build_prompt(self, line): + if isinstance(line, int): + line = self.data.iloc[line] + + if self.meta_only: + tgt_path = toliststr(line['image_path']) + else: + tgt_path = self.dump_image(line) + + prompt = f"## Question\n {line['question']}" + if line['answer_type'] == 'multiple choice': + inst = "Provide the corresponing choice option in the 'short answer' key, such as 'A', 'B', 'C', or 'D'." + elif line['answer_type'] == 'float': + inst = "Format the answer as a three-digit floating-point number and provide it in the 'short answer' key." + else: + inst = "Float numbers in the answer should be formatted as three-digit floating-point numbers." + + if 'noprompt' not in self.dataset_name: + prompt = prompt + self.GUIDE.format(INST=inst) + self.TEXT_EXAMPLE + + msgs = [] + if isinstance(tgt_path, list): + msgs.extend([dict(type='image', value=p) for p in tgt_path]) + else: + msgs = [dict(type='image', value=tgt_path)] + msgs.append(dict(type='text', value=prompt)) + return msgs + + def evaluate(self, eval_file, **judge_kwargs): + judge_name = judge_kwargs.pop('model', 'gpt-4o-mini') + + model = build_judge(model=judge_name, **judge_kwargs) + suffix = eval_file.split('.')[-1] + + storage = eval_file.replace(f'.{suffix}', f'_{judge_name}.xlsx') # noqa: F841 + score_file = eval_file.replace(f'.{suffix}', f'_{judge_name}_score.csv') # noqa: F841 + tmp_file = eval_file.replace(f'.{suffix}', f'_{judge_name}.pkl') # noqa: F841 + nproc = judge_kwargs.pop('nproc', 6) # noqa: F841 + + res = load(tmp_file) if os.path.exists(tmp_file) else {} + res = {k: v for k, v in res.items() if v is not None} + + model.system_prompt = """\ +You are a helpful assistant that helps me to format free-form answers into a short answer according to the instruction. +""" + if not osp.exists(storage): + data = load(eval_file) + lt = len(data) + payloads = [dict(model=model, line=data.iloc[i]) for i in range(lt) if data.iloc[i]['index'] not in res] + keys = [idx for idx in data['index'] if idx not in res] + + if len(keys): + results = track_progress_rich(DynaMath_auxeval, payloads, nproc=nproc, save=tmp_file, keys=keys) + for k, r in zip(keys, results): + res[k] = r + + data['parse'] = [res[idx]['parse'] for idx in data['index']] + data['extracted'] = [res[idx]['extracted'] for idx in data['index']] + data['correct'] = [res[idx]['correct'] for idx in data['index']] + dump(data, storage) + + data = load(storage) + # Calculate Average Accuracy + score_avg = {} + score_avg['Overall'] = np.mean(data['correct']) + + subs = set(data['subject']) + for sub in subs: + data_sub = data[data['subject'] == sub] + score_avg[f'Subject-{sub}'] = np.mean(data_sub['correct']) + + lvls = set(data['knowledge_level']) + for lvl in lvls: + data_lvl = data[data['knowledge_level'] == lvl] + score_avg[f'Level-{lvl}'] = np.mean(data_lvl['correct']) + + # Calculate the Worst Case Accuracy + score_worst = {} + data_worst = data[data['varid'] == 1] + qid2corr = {idx: True for idx in data_worst['index']} + lt = len(data) + for i in range(lt): + item = data.iloc[i] + qid2corr[item['qid']] *= item['correct'] + data_worst['correct'] = [qid2corr[idx] for idx in data_worst['qid']] + score_worst['Overall'] = np.mean(data_worst['correct']) + + subs = set(data_worst['subject']) + for sub in subs: + data_sub = data_worst[data_worst['subject'] == sub] + score_worst[f'Subject-{sub}'] = np.mean(data_sub['correct']) + + lvls = set(data_worst['knowledge_level']) + for lvl in lvls: + data_lvl = data_worst[data_worst['knowledge_level'] == lvl] + score_worst[f'Level-{lvl}'] = np.mean(data_lvl['correct']) + + d1 = {'Setting': 'Average'} + d1.update(score_avg) + d2 = {'Setting': 'Worst Case'} + d2.update(score_worst) + score = pd.concat([d2df(d1), d2df(d2)], ignore_index=True) + + dump(score, score_file) + return score diff --git a/vlmeval/dataset/emma.py b/vlmeval/dataset/emma.py new file mode 100644 index 0000000000000000000000000000000000000000..2aa3eb763bfde82f51b727348a33c894e21909b2 --- /dev/null +++ b/vlmeval/dataset/emma.py @@ -0,0 +1,55 @@ +from vlmeval import * +from .image_shortqa import ImageShortQADataset +from .image_mcq import MMMUDataset + +class EMMADataset(ImageShortQADataset): + + COT_INST = "Please solve the problem step by step. " + DIRECT_INST = "Please ensure that your output only contains the final answer without any additional content (such as intermediate reasoning steps)." + MCQ_FMT = "{context}\n\n{question}\n\n{options}\n\nAnswer with the option's letter from the given choices. " + OPEN_FMT = "{context}\n\n{question}\n\nAnswer the question using a single word or phrase. " + + DATASET_URL = { + 'EMMA': 'https://opencompass.openxlab.space/utils/VLMEval/EMMA.tsv', + 'EMMA_COT': 'https://opencompass.openxlab.space/utils/VLMEval/EMMA.tsv' + } + + def build_prompt(self, line): + if isinstance(line, int): + line = self.data.iloc[line] + + if self.meta_only: + tgt_path = toliststr(line['image_path']) + else: + tgt_path = self.dump_image(line) + + context = line['context'] + question = line['question'] + example = "" + res_dict = {} + if line['type'] == 'MCQ': + for ch in string.ascii_uppercase: + if ch in line and not pd.isna(line[ch]): + example += f"{ch}: {line[ch]}\n" + + prompt_tmpl = EMMADataset.MCQ_FMT + if not pd.isna(context) and context is not None: + prompt = prompt_tmpl.format(context=context, question=question, options=example) + else: + prompt = prompt_tmpl.split('{context}\n\n')[1].format(question=question, options=example) + prompt += EMMADataset.COT_INST if 'COT' in self.dataset_name else EMMADataset.DIRECT_INST + else: + prompt_tmpl = EMMADataset.OPEN_FMT + if not pd.isna(context) and context is not None: + prompt = prompt_tmpl.format(context=context, question=question) + else: + prompt = prompt_tmpl.split('{context}\n\n')[1].format(question=question) + prompt += EMMADataset.COT_INST if 'COT' in self.dataset_name else EMMADataset.DIRECT_INST + + msgs = [] + if isinstance(tgt_path, list): + msgs.extend([dict(type='image', value=p) for p in tgt_path]) + else: + msgs = [dict(type='image', value=tgt_path)] + msgs.append(dict(type='text', value=prompt)) + return MMMUDataset.split_MMMU(msgs) diff --git a/vlmeval/dataset/image_base.py b/vlmeval/dataset/image_base.py new file mode 100644 index 0000000000000000000000000000000000000000..e13d136dd5ec723ea5f9838c172ea16c7704d3dc --- /dev/null +++ b/vlmeval/dataset/image_base.py @@ -0,0 +1,175 @@ +import pandas as pd +from abc import abstractmethod +from ..smp import * + + +def img_root_map(dataset): + if 'MM_NIAH' in dataset: + return 'MMNIAH' + if 'CRPE' in dataset: + return 'CRPE' + if 'OCRVQA' in dataset: + return 'OCRVQA' + if 'COCO_VAL' == dataset: + return 'COCO' + if 'MMMU' in dataset: + return 'MMMU' + if "QSpatial" in dataset: + return "QSpatial" + + mmbench_root_map = { + 'MMBench_DEV_EN': 'MMBench', 'MMBench_TEST_EN': 'MMBench', + 'MMBench_DEV_CN': 'MMBench', 'MMBench_TEST_CN': 'MMBench', + 'MMBench': 'MMBench', 'MMBench_CN': 'MMBench', + 'MMBench_DEV_EN_V11': 'MMBench_V11', 'MMBench_TEST_EN_V11': 'MMBench_V11', + 'MMBench_DEV_CN_V11': 'MMBench_V11', 'MMBench_TEST_CN_V11': 'MMBench_V11', + 'MMBench_V11': 'MMBench', 'MMBench_CN_V11': 'MMBench', + } + if dataset in mmbench_root_map: + return mmbench_root_map[dataset] + return dataset + + +class ImageBaseDataset: + + MODALITY = 'IMAGE' + DATASET_URL = {} + DATASET_MD5 = {} + + def __init__(self, dataset='MMBench', skip_noimg=True): + ROOT = LMUDataRoot() + # You can override this variable to save image files to a different directory + self.dataset_name = dataset + self.img_root = osp.join(ROOT, 'images', img_root_map(dataset)) + + data = self.load_data(dataset) + self.skip_noimg = skip_noimg + if skip_noimg and 'image' in data: + data = data[~pd.isna(data['image'])] + + data['index'] = [str(x) for x in data['index']] + + self.meta_only = True + + # The image field can store the base64 encoded image or another question index (for saving space) + if 'image' in data: + data['image'] = [str(x) for x in data['image']] + image_map = {x: y for x, y in zip(data['index'], data['image'])} + for k in image_map: + if len(image_map[k]) <= 64: + idx = image_map[k] + assert idx in image_map and len(image_map[idx]) > 64 + image_map[k] = image_map[idx] + + images = [toliststr(image_map[k]) for k in data['index']] + data['image'] = [x[0] if len(x) == 1 else x for x in images] + self.meta_only = False + + if 'image_path' in data: + paths = [toliststr(x) for x in data['image_path']] + data['image_path'] = [x[0] if len(x) == 1 else x for x in paths] + + if np.all([istype(x, int) for x in data['index']]): + data['index'] = [int(x) for x in data['index']] + + self.data = data + self.post_build(dataset) + + def __len__(self): + return len(self.data) + + def __getitem__(self, idx): + return dict(self.data.iloc[idx]) + + def prepare_tsv(self, url, file_md5=None): + data_root = LMUDataRoot() + os.makedirs(data_root, exist_ok=True) + update_flag = False + file_name = url.split('/')[-1] + data_path = osp.join(data_root, file_name) + self.data_path=data_path + if osp.exists(data_path) and (file_md5 is None or md5(data_path) == file_md5): + pass + else: + warnings.warn('The dataset tsv is not downloaded') + download_file(url, data_path) + update_flag = True + + if file_size(data_path, 'GB') > 1: + local_path = data_path.replace('.tsv', '_local.tsv') + if not osp.exists(local_path) or os.environ.get('FORCE_LOCAL', None) or update_flag: + from ..tools import LOCALIZE + LOCALIZE(data_path, local_path) + data_path = local_path + return load(data_path) + + def dump_image(self, line): + os.makedirs(self.img_root, exist_ok=True) + + if 'image' in line: + if isinstance(line['image'], list): + tgt_path = [] + assert 'image_path' in line + for img, im_name in zip(line['image'], line['image_path']): + path = osp.join(self.img_root, im_name) + if not read_ok(path): + decode_base64_to_image_file(img, path) + tgt_path.append(path) + else: + tgt_path = osp.join(self.img_root, f"{line['index']}.jpg") + if not read_ok(tgt_path): + decode_base64_to_image_file(line['image'], tgt_path) + tgt_path = [tgt_path] + else: + assert 'image_path' in line + tgt_path = toliststr(line['image_path']) + + return tgt_path + + def display(self, line): + if isinstance(line, int): + line = self.data.iloc[line] + assert isinstance(line, pd.Series) or isinstance(line, dict) + mmqa_display(line) + + # Return a list of dataset names that are supported by this class, can override + @classmethod + def supported_datasets(cls): + return list(cls.DATASET_URL) + + # Given the dataset name, return the dataset as a pandas dataframe, can override + def load_data(self, dataset): + url = self.DATASET_URL.get(dataset, None) + if url is None or url == '': + url = dataset + '.tsv' + file_md5 = self.DATASET_MD5[dataset] if dataset in self.DATASET_MD5 else None + return self.prepare_tsv(url, file_md5) + + # Post built hook, will be called after the dataset is built, can override + def post_build(self, dataset): + pass + + # Given one data record, return the built prompt (a multi-modal message), can override + def build_prompt(self, line): + if isinstance(line, int): + line = self.data.iloc[line] + + if self.meta_only: + tgt_path = toliststr(line['image_path']) + else: + tgt_path = self.dump_image(line) + + question = line['question'] + + msgs = [] + if isinstance(tgt_path, list): + msgs.extend([dict(type='image', value=p) for p in tgt_path]) + else: + msgs = [dict(type='image', value=tgt_path)] + msgs.append(dict(type='text', value=question)) + return msgs + + # Given the prediction file, return the evaluation results in the format of a dictionary or pandas dataframe + @abstractmethod + def evaluate(self, eval_file, **judge_kwargs): + pass diff --git a/vlmeval/dataset/image_caption.py b/vlmeval/dataset/image_caption.py new file mode 100644 index 0000000000000000000000000000000000000000..23282805c8d63047c57d02266cec21b48e191196 --- /dev/null +++ b/vlmeval/dataset/image_caption.py @@ -0,0 +1,75 @@ +from .image_base import ImageBaseDataset +from ..smp import * + + +class COCO_Caption_Scorer(): + def __init__(self, ref, gt): + from pycocoevalcap.bleu.bleu import Bleu + from pycocoevalcap.rouge.rouge import Rouge + from pycocoevalcap.cider.cider import Cider + + self.ref = ref + self.gt = gt + print('setting up scorers...') + self.scorers = [ + (Bleu(4), ['Bleu_1', 'Bleu_2', 'Bleu_3', 'Bleu_4']), + (Rouge(), 'ROUGE_L'), + (Cider(), 'CIDEr'), + ] + + def compute_scores(self): + total_scores = {} + for scorer, method in self.scorers: + print('computing %s score...' % (scorer.method())) + score, scores = scorer.compute_score(self.gt, self.ref) + if isinstance(method, list): + for sc, scs, m in zip(score, scores, method): + print('%s: %0.3f' % (m, sc * 100)) + total_scores['Bleu'] = [x * 100 for x in score] + else: + print('%s: %0.3f' % (method, score * 100)) + total_scores[method] = score * 100 + + print('*****DONE*****') + for key, value in total_scores.items(): + print('{}:{}'.format(key, value)) + return total_scores + + +class ImageCaptionDataset(ImageBaseDataset): + + TYPE = 'Caption' + + DATASET_URL = { + 'COCO_VAL': 'https://opencompass.openxlab.space/utils/VLMEval/COCO_VAL.tsv', + } + + DATASET_MD5 = { + 'COCO_VAL': '72a5079dead060269ac222c5aa5128af', + } + + def load_data(self, dataset): + data = super().load_data(dataset) + if 'question' not in data: + data['question'] = [( + 'Please describe this image in general. Directly provide the description, ' + 'do not include prefix like "This image depicts". ' + )] * len(data) + return data + + # It returns a dictionary of scores + @classmethod + def evaluate(self, eval_file, **kwargs): + data = load(eval_file) + lt = len(data) + lines = [data.iloc[i] for i in range(lt)] + ref, gt = {}, {} + for i, line in enumerate(lines): + ref[str(i)] = [str(line['prediction'])] + gt[str(i)] = eval(line['answer']) + + scorer = COCO_Caption_Scorer(ref, gt) + coco_caption_score_dict = scorer.compute_scores() + score_pth = eval_file.replace('.xlsx', '_score.json') + dump(coco_caption_score_dict, score_pth) + return coco_caption_score_dict diff --git a/vlmeval/dataset/image_ccocr.py b/vlmeval/dataset/image_ccocr.py new file mode 100644 index 0000000000000000000000000000000000000000..aa1d7e26edcf77fd216fa13ce41d6c8ba00f4106 --- /dev/null +++ b/vlmeval/dataset/image_ccocr.py @@ -0,0 +1,197 @@ +# flake8: noqa + +import os +import re +import tempfile +from functools import partial +import pandas as pd + +from .image_base import ImageBaseDataset +from ..smp import * + +# should be the same as FAIL_MSG definded in vlmeval/inference.py +FAIL_MSG = 'Failed to obtain answer via API.' + + +class CCOCRDataset(ImageBaseDataset): + TYPE = 'VQA' + DATASET_URL_MODELSCOPE = { + "CCOCR_DocParsing_DocPhotoChn": "https://www.modelscope.cn/datasets/Qwen/CC-OCR/resolve/master/doc_parsing/doc/doc_photo_chn_75.tsv", + "CCOCR_DocParsing_DocPhotoEng": "https://www.modelscope.cn/datasets/Qwen/CC-OCR/resolve/master/doc_parsing/doc/doc_photo_eng_75.tsv", + "CCOCR_DocParsing_DocScanChn": "https://www.modelscope.cn/datasets/Qwen/CC-OCR/resolve/master/doc_parsing/doc/doc_scan_chn_75.tsv", + "CCOCR_DocParsing_DocScanEng": "https://www.modelscope.cn/datasets/Qwen/CC-OCR/resolve/master/doc_parsing/doc/doc_scan_eng_75.tsv", + "CCOCR_DocParsing_TablePhotoChn": "https://www.modelscope.cn/datasets/Qwen/CC-OCR/resolve/master/doc_parsing/table/table_photo_chn_75.tsv", + "CCOCR_DocParsing_TablePhotoEng": "https://www.modelscope.cn/datasets/Qwen/CC-OCR/resolve/master/doc_parsing/table/table_photo_eng_75.tsv", + "CCOCR_DocParsing_TableScanChn": "https://www.modelscope.cn/datasets/Qwen/CC-OCR/resolve/master/doc_parsing/table/table_scan_chn_75.tsv", + "CCOCR_DocParsing_TableScanEng": "https://www.modelscope.cn/datasets/Qwen/CC-OCR/resolve/master/doc_parsing/table/table_scan_eng_75.tsv", + "CCOCR_DocParsing_MolecularHandwriting": "https://www.modelscope.cn/datasets/Qwen/CC-OCR/resolve/master/doc_parsing/molecular/molecular_handwriting_100.tsv", + "CCOCR_DocParsing_FormulaHandwriting": "https://www.modelscope.cn/datasets/Qwen/CC-OCR/resolve/master/doc_parsing/formula/formula_handwriting_100.tsv", + "CCOCR_Kie_Sroie2019Word": "https://www.modelscope.cn/datasets/Qwen/CC-OCR/resolve/master/kie/constrained_category/sroie2019_word_347.tsv", + "CCOCR_Kie_Cord": "https://www.modelscope.cn/datasets/Qwen/CC-OCR/resolve/master/kie/constrained_category/CORD_100.tsv", + "CCOCR_Kie_EphoieScut": "https://www.modelscope.cn/datasets/Qwen/CC-OCR/resolve/master/kie/constrained_category/EPHOIE_SCUT_311.tsv", + "CCOCR_Kie_Poie": "https://www.modelscope.cn/datasets/Qwen/CC-OCR/resolve/master/kie/constrained_category/POIE_250.tsv", + "CCOCR_Kie_ColdSibr": "https://www.modelscope.cn/datasets/Qwen/CC-OCR/resolve/master/kie/open_category/COLD_SIBR_400.tsv", + "CCOCR_Kie_ColdCell": "https://www.modelscope.cn/datasets/Qwen/CC-OCR/resolve/master/kie/open_category/COLD_CELL_600.tsv", + "CCOCR_MultiLanOcr_Arabic": "https://www.modelscope.cn/datasets/Qwen/CC-OCR/resolve/master/multi_lan_ocr/Arabic/Arabic_150.tsv", + "CCOCR_MultiLanOcr_French": "https://www.modelscope.cn/datasets/Qwen/CC-OCR/resolve/master/multi_lan_ocr/French/French_150.tsv", + "CCOCR_MultiLanOcr_German": "https://www.modelscope.cn/datasets/Qwen/CC-OCR/resolve/master/multi_lan_ocr/German/German_150.tsv", + "CCOCR_MultiLanOcr_Italian": "https://www.modelscope.cn/datasets/Qwen/CC-OCR/resolve/master/multi_lan_ocr/Italian/Italian_150.tsv", + "CCOCR_MultiLanOcr_Japanese": "https://www.modelscope.cn/datasets/Qwen/CC-OCR/resolve/master/multi_lan_ocr/Japanese/Japanese_150.tsv", + "CCOCR_MultiLanOcr_Korean": "https://www.modelscope.cn/datasets/Qwen/CC-OCR/resolve/master/multi_lan_ocr/Korean/Korean_150.tsv", + "CCOCR_MultiLanOcr_Portuguese": "https://www.modelscope.cn/datasets/Qwen/CC-OCR/resolve/master/multi_lan_ocr/Portuguese/Portuguese_150.tsv", + "CCOCR_MultiLanOcr_Russian": "https://www.modelscope.cn/datasets/Qwen/CC-OCR/resolve/master/multi_lan_ocr/Russian/Russian_150.tsv", + "CCOCR_MultiLanOcr_Spanish": "https://www.modelscope.cn/datasets/Qwen/CC-OCR/resolve/master/multi_lan_ocr/Spanish/Spanish_150.tsv", + "CCOCR_MultiLanOcr_Vietnamese": "https://www.modelscope.cn/datasets/Qwen/CC-OCR/resolve/master/multi_lan_ocr/Vietnamese/Vietnamese_150.tsv", + "CCOCR_MultiSceneOcr_Cord": "https://www.modelscope.cn/datasets/Qwen/CC-OCR/resolve/master/multi_scene_ocr/document_text/CORD_100.tsv", + "CCOCR_MultiSceneOcr_Funsd": "https://www.modelscope.cn/datasets/Qwen/CC-OCR/resolve/master/multi_scene_ocr/document_text/FUNSD_50.tsv", + "CCOCR_MultiSceneOcr_Iam": "https://www.modelscope.cn/datasets/Qwen/CC-OCR/resolve/master/multi_scene_ocr/document_text/IAM_50.tsv", + "CCOCR_MultiSceneOcr_ZhDoc": "https://www.modelscope.cn/datasets/Qwen/CC-OCR/resolve/master/multi_scene_ocr/document_text/zh_doc_100.tsv", + "CCOCR_MultiSceneOcr_ZhHandwriting": "https://www.modelscope.cn/datasets/Qwen/CC-OCR/resolve/master/multi_scene_ocr/document_text/zh_handwriting_50.tsv", + "CCOCR_MultiSceneOcr_Hieragent": "https://www.modelscope.cn/datasets/Qwen/CC-OCR/resolve/master/multi_scene_ocr/scene_text/Hieragent_100.tsv", + "CCOCR_MultiSceneOcr_Ic15": "https://www.modelscope.cn/datasets/Qwen/CC-OCR/resolve/master/multi_scene_ocr/scene_text/IC15_500.tsv", + "CCOCR_MultiSceneOcr_Inversetext": "https://www.modelscope.cn/datasets/Qwen/CC-OCR/resolve/master/multi_scene_ocr/scene_text/InverseText_500.tsv", + "CCOCR_MultiSceneOcr_Totaltext": "https://www.modelscope.cn/datasets/Qwen/CC-OCR/resolve/master/multi_scene_ocr/scene_text/TotalText_300.tsv", + "CCOCR_MultiSceneOcr_ZhScene": "https://www.modelscope.cn/datasets/Qwen/CC-OCR/resolve/master/multi_scene_ocr/scene_text/zh_scene_450.tsv", + "CCOCR_MultiSceneOcr_UgcLaion": "https://www.modelscope.cn/datasets/Qwen/CC-OCR/resolve/master/multi_scene_ocr/ugc_text/ugc_laion_400.tsv", + "CCOCR_MultiSceneOcr_ZhDense": "https://www.modelscope.cn/datasets/Qwen/CC-OCR/resolve/master/multi_scene_ocr/ugc_text/zh_dense_50.tsv", + "CCOCR_MultiSceneOcr_ZhVertical": "https://www.modelscope.cn/datasets/Qwen/CC-OCR/resolve/master/multi_scene_ocr/ugc_text/zh_vertical_100.tsv" + } + + DATASET_URL_HUGGINGFACE = { + "CCOCR_DocParsing_DocPhotoChn": "https://huggingface.co/datasets/wulipc/CC-OCR/resolve/main/doc_parsing/doc/doc_photo_chn_75.tsv", + "CCOCR_DocParsing_DocPhotoEng": "https://huggingface.co/datasets/wulipc/CC-OCR/resolve/main/doc_parsing/doc/doc_photo_eng_75.tsv", + "CCOCR_DocParsing_DocScanChn": "https://huggingface.co/datasets/wulipc/CC-OCR/resolve/main/doc_parsing/doc/doc_scan_chn_75.tsv", + "CCOCR_DocParsing_DocScanEng": "https://huggingface.co/datasets/wulipc/CC-OCR/resolve/main/doc_parsing/doc/doc_scan_eng_75.tsv", + "CCOCR_DocParsing_TablePhotoChn": "https://huggingface.co/datasets/wulipc/CC-OCR/resolve/main/doc_parsing/table/table_photo_chn_75.tsv", + "CCOCR_DocParsing_TablePhotoEng": "https://huggingface.co/datasets/wulipc/CC-OCR/resolve/main/doc_parsing/table/table_photo_eng_75.tsv", + "CCOCR_DocParsing_TableScanChn": "https://huggingface.co/datasets/wulipc/CC-OCR/resolve/main/doc_parsing/table/table_scan_chn_75.tsv", + "CCOCR_DocParsing_TableScanEng": "https://huggingface.co/datasets/wulipc/CC-OCR/resolve/main/doc_parsing/table/table_scan_eng_75.tsv", + "CCOCR_DocParsing_MolecularHandwriting": "https://huggingface.co/datasets/wulipc/CC-OCR/resolve/main/doc_parsing/molecular/molecular_handwriting_100.tsv", + "CCOCR_DocParsing_FormulaHandwriting": "https://huggingface.co/datasets/wulipc/CC-OCR/resolve/main/doc_parsing/formula/formula_handwriting_100.tsv", + "CCOCR_Kie_Sroie2019Word": "https://huggingface.co/datasets/wulipc/CC-OCR/resolve/main/kie/constrained_category/sroie2019_word_347.tsv", + "CCOCR_Kie_Cord": "https://huggingface.co/datasets/wulipc/CC-OCR/resolve/main/kie/constrained_category/CORD_100.tsv", + "CCOCR_Kie_EphoieScut": "https://huggingface.co/datasets/wulipc/CC-OCR/resolve/main/kie/constrained_category/EPHOIE_SCUT_311.tsv", + "CCOCR_Kie_Poie": "https://huggingface.co/datasets/wulipc/CC-OCR/resolve/main/kie/constrained_category/POIE_250.tsv", + "CCOCR_Kie_ColdSibr": "https://huggingface.co/datasets/wulipc/CC-OCR/resolve/main/kie/open_category/COLD_SIBR_400.tsv", + "CCOCR_Kie_ColdCell": "https://huggingface.co/datasets/wulipc/CC-OCR/resolve/main/kie/open_category/COLD_CELL_600.tsv", + "CCOCR_MultiLanOcr_Arabic": "https://huggingface.co/datasets/wulipc/CC-OCR/resolve/main/multi_lan_ocr/Arabic/Arabic_150.tsv", + "CCOCR_MultiLanOcr_French": "https://huggingface.co/datasets/wulipc/CC-OCR/resolve/main/multi_lan_ocr/French/French_150.tsv", + "CCOCR_MultiLanOcr_German": "https://huggingface.co/datasets/wulipc/CC-OCR/resolve/main/multi_lan_ocr/German/German_150.tsv", + "CCOCR_MultiLanOcr_Italian": "https://huggingface.co/datasets/wulipc/CC-OCR/resolve/main/multi_lan_ocr/Italian/Italian_150.tsv", + "CCOCR_MultiLanOcr_Japanese": "https://huggingface.co/datasets/wulipc/CC-OCR/resolve/main/multi_lan_ocr/Japanese/Japanese_150.tsv", + "CCOCR_MultiLanOcr_Korean": "https://huggingface.co/datasets/wulipc/CC-OCR/resolve/main/multi_lan_ocr/Korean/Korean_150.tsv", + "CCOCR_MultiLanOcr_Portuguese": "https://huggingface.co/datasets/wulipc/CC-OCR/resolve/main/multi_lan_ocr/Portuguese/Portuguese_150.tsv", + "CCOCR_MultiLanOcr_Russian": "https://huggingface.co/datasets/wulipc/CC-OCR/resolve/main/multi_lan_ocr/Russian/Russian_150.tsv", + "CCOCR_MultiLanOcr_Spanish": "https://huggingface.co/datasets/wulipc/CC-OCR/resolve/main/multi_lan_ocr/Spanish/Spanish_150.tsv", + "CCOCR_MultiLanOcr_Vietnamese": "https://huggingface.co/datasets/wulipc/CC-OCR/resolve/main/multi_lan_ocr/Vietnamese/Vietnamese_150.tsv", + "CCOCR_MultiSceneOcr_Cord": "https://huggingface.co/datasets/wulipc/CC-OCR/resolve/main/multi_scene_ocr/document_text/CORD_100.tsv", + "CCOCR_MultiSceneOcr_Funsd": "https://huggingface.co/datasets/wulipc/CC-OCR/resolve/main/multi_scene_ocr/document_text/FUNSD_50.tsv", + "CCOCR_MultiSceneOcr_Iam": "https://huggingface.co/datasets/wulipc/CC-OCR/resolve/main/multi_scene_ocr/document_text/IAM_50.tsv", + "CCOCR_MultiSceneOcr_ZhDoc": "https://huggingface.co/datasets/wulipc/CC-OCR/resolve/main/multi_scene_ocr/document_text/zh_doc_100.tsv", + "CCOCR_MultiSceneOcr_ZhHandwriting": "https://huggingface.co/datasets/wulipc/CC-OCR/resolve/main/multi_scene_ocr/document_text/zh_handwriting_50.tsv", + "CCOCR_MultiSceneOcr_Hieragent": "https://huggingface.co/datasets/wulipc/CC-OCR/resolve/main/multi_scene_ocr/scene_text/Hieragent_100.tsv", + "CCOCR_MultiSceneOcr_Ic15": "https://huggingface.co/datasets/wulipc/CC-OCR/resolve/main/multi_scene_ocr/scene_text/IC15_500.tsv", + "CCOCR_MultiSceneOcr_Inversetext": "https://huggingface.co/datasets/wulipc/CC-OCR/resolve/main/multi_scene_ocr/scene_text/InverseText_500.tsv", + "CCOCR_MultiSceneOcr_Totaltext": "https://huggingface.co/datasets/wulipc/CC-OCR/resolve/main/multi_scene_ocr/scene_text/TotalText_300.tsv", + "CCOCR_MultiSceneOcr_ZhScene": "https://huggingface.co/datasets/wulipc/CC-OCR/resolve/main/multi_scene_ocr/scene_text/zh_scene_450.tsv", + "CCOCR_MultiSceneOcr_UgcLaion": "https://huggingface.co/datasets/wulipc/CC-OCR/resolve/main/multi_scene_ocr/ugc_text/ugc_laion_400.tsv", + "CCOCR_MultiSceneOcr_ZhDense": "https://huggingface.co/datasets/wulipc/CC-OCR/resolve/main/multi_scene_ocr/ugc_text/zh_dense_50.tsv", + "CCOCR_MultiSceneOcr_ZhVertical": "https://huggingface.co/datasets/wulipc/CC-OCR/resolve/main/multi_scene_ocr/ugc_text/zh_vertical_100.tsv" + } + + # define data path + DATASET_URL = DATASET_URL_MODELSCOPE + DATASET_MD5 = { + "CCOCR_DocParsing_DocPhotoChn": "9039dcbb31830d413261a95cfa29d97f", + "CCOCR_DocParsing_DocPhotoEng": "2ca0824881e1d7317626f2a19d902989", + "CCOCR_DocParsing_DocScanChn": "9e265c8aa760ebdf5c3bf9e892d55492", + "CCOCR_DocParsing_DocScanEng": "77d04637be3def86dbc2ce37ba64a704", + "CCOCR_DocParsing_TablePhotoChn": "c4dc85252ddad2b43a03a67b1d1ae983", + "CCOCR_DocParsing_TablePhotoEng": "02ab75d6169da0cd2ece9ce0ae14a479", + "CCOCR_DocParsing_TableScanChn": "f1f79959fdd01127df7377c9d46722f2", + "CCOCR_DocParsing_TableScanEng": "794903c7acf52bfe956eefba2166d14b", + "CCOCR_DocParsing_MolecularHandwriting": "30b7f7679b713ce000a939eca7b4078f", + "CCOCR_DocParsing_FormulaHandwriting": "e03047776ce5e79a61ae1c057e2a348e", + "CCOCR_Kie_Sroie2019Word": "3287d99a8e86a99b74171fa5a70f9acb", + "CCOCR_Kie_Cord": "ab297cadcbc7158884a301c366f3330a", + "CCOCR_Kie_EphoieScut": "bb8fa3ba7ea91cbf17be0904956ad3f3", + "CCOCR_Kie_Poie": "882b64317989ecbfed6518051cdffb14", + "CCOCR_Kie_ColdSibr": "109d5dad8b7081fb6a2f088e963196d4", + "CCOCR_Kie_ColdCell": "7b44c45b4d7d768d1dbdc08872fe7d3a", + "CCOCR_MultiLanOcr_Arabic": "e9a3f2bb9298d0b882ebc7a98980c3f3", + "CCOCR_MultiLanOcr_French": "729407ed2036c22e602eff645eddd40c", + "CCOCR_MultiLanOcr_German": "96fc2edae747f0ec95b0a6f9bf723022", + "CCOCR_MultiLanOcr_Italian": "29a508fa5d5a5e767497dd69e2430ebb", + "CCOCR_MultiLanOcr_Japanese": "bbcca96ccf25fff63597c2ab4f3ebb1f", + "CCOCR_MultiLanOcr_Korean": "0f55dbd24eba5edc189c91e124411641", + "CCOCR_MultiLanOcr_Portuguese": "a6fcf8831775a61aa631c0cf1c422ae7", + "CCOCR_MultiLanOcr_Russian": "19d2f84062a1699d3e9333912bd6b303", + "CCOCR_MultiLanOcr_Spanish": "f5a0cfa9f2ae4115c91c7b362034e591", + "CCOCR_MultiLanOcr_Vietnamese": "bf1cd4e83d91767f4906f81550cec8b9", + "CCOCR_MultiSceneOcr_Cord": "92943f0ccb4c5a196c574222e76759a0", + "CCOCR_MultiSceneOcr_Funsd": "229cc38d193edd00f4383610e98ee873", + "CCOCR_MultiSceneOcr_Iam": "d897a6d6c3880c65e752ec11b211204c", + "CCOCR_MultiSceneOcr_ZhDoc": "303682cc16c8bb51b2b896f8ceb8bd38", + "CCOCR_MultiSceneOcr_ZhHandwriting": "faa298d366bc05e5cfb39e334afb8eff", + "CCOCR_MultiSceneOcr_Hieragent": "6f132cdd0473d7cc145c3e3a08957dd6", + "CCOCR_MultiSceneOcr_Ic15": "3d94869f312a41d53d0578a06a2fb1f2", + "CCOCR_MultiSceneOcr_Inversetext": "e141d424a0c4cf9579064428a270f13d", + "CCOCR_MultiSceneOcr_Totaltext": "ca1daf81d49eeb57ef844b72a23c2e62", + "CCOCR_MultiSceneOcr_ZhScene": "9295152a66e6f117db8bfbb20a9013e6", + "CCOCR_MultiSceneOcr_UgcLaion": "8e9ea1fbf9d56532157e807eabf39b21", + "CCOCR_MultiSceneOcr_ZhDense": "de8f48ee0c8a2cf8ed7f2b3a81e6322d", + "CCOCR_MultiSceneOcr_ZhVertical": "4892b4aec6e7fd11e39aaea23712709b" + } + + # It returns a DataFrame + def evaluate(self, eval_file, **judge_kwargs): + """ + """ + df = load(eval_file) + dict_list = df.to_dict(orient='records') + + required_colume_list = ['answer', 'prediction', "category", "image_name", "l2-category", "split"] + for required_colume in required_colume_list: + assert required_colume in df, "required_colume: {} NOT found".format(required_colume) + + gt_info, ptd_info = {}, {} + for data_info in dict_list: + image_name = data_info['image_name'] + gt_info[image_name] = data_info['answer'] + + # warning the FAIL samples + if data_info['prediction'] != FAIL_MSG: + ptd_info[image_name] = data_info['prediction'] + + # assert eval_file is a single dataset + group_name = set([str(x) for x in df['category']]).pop() + op_name = set([str(x) for x in df['l2-category']]).pop() + data_name = set([str(x) for x in df['split']]).pop() + + data_info = {"op": op_name, "group": group_name, "dataset": data_name, "num": len(gt_info)} + try: + from .utils.ccocr_evaluator import evaluator_map_info as ccocr_evaluator_map + except ImportError as err: + import warnings + warnings.warn('The dependency of CCOCR evaluator is not properly installed') + warnings.warn(f'{type(err)}: {err}') + eval_func = ccocr_evaluator_map.get(group_name, None) + if eval_func is None: + raise ValueError("error: evaluator not defined for: {}".format(group_name)) + meta_info, eval_info = eval_func(ptd_info, gt_info, **data_info) + + output_info = {"meta": meta_info, "evaluation": eval_info, "config": data_info} + result_file = os.path.splitext(os.path.abspath(eval_file))[0] + "_eval.json" + dump(output_info, result_file) + + # update global status for summary + # warning: the evaluate function should NOT run in parallel + all_status_info = {} + global_status_path = os.path.join(os.path.dirname(eval_file), "status.json") + if os.path.exists(global_status_path): + with open(global_status_path, "r") as f: + all_status_info = json.load(f) + all_status_info[data_name] = output_info + with open(global_status_path, "w") as f: + json.dump(all_status_info, f, ensure_ascii=False, indent=4) + return eval_info.get("summary") diff --git a/vlmeval/dataset/image_mcq.py b/vlmeval/dataset/image_mcq.py new file mode 100644 index 0000000000000000000000000000000000000000..95f08a3b2544789696509310f932c7e78a2efb30 --- /dev/null +++ b/vlmeval/dataset/image_mcq.py @@ -0,0 +1,1185 @@ +import warnings + +from .image_base import ImageBaseDataset +from .utils import build_judge, DEBUG_MESSAGE +from ..smp import * +import pandas as pd + +MMMB_URLS = { + 'MMMB_ar': 'https://huggingface.co/datasets/AIDC-AI/Parrot-dataset/resolve/main/mmmb/mmmb_ar.tsv', + 'MMMB_cn': 'https://huggingface.co/datasets/AIDC-AI/Parrot-dataset/resolve/main/mmmb/mmmb_cn.tsv', + 'MMMB_en': 'https://huggingface.co/datasets/AIDC-AI/Parrot-dataset/resolve/main/mmmb/mmmb_en.tsv', + 'MMMB_pt': 'https://huggingface.co/datasets/AIDC-AI/Parrot-dataset/resolve/main/mmmb/mmmb_pt.tsv', + 'MMMB_ru': 'https://huggingface.co/datasets/AIDC-AI/Parrot-dataset/resolve/main/mmmb/mmmb_ru.tsv', + 'MMMB_tr': 'https://huggingface.co/datasets/AIDC-AI/Parrot-dataset/resolve/main/mmmb/mmmb_tr.tsv', +} + +MTL_MMBench_URLS = { + 'MMBench_dev_ar': 'https://huggingface.co/datasets/AIDC-AI/Parrot-dataset/resolve/main/mmbench/mmbench_dev_ar.tsv', + 'MMBench_dev_cn': 'https://huggingface.co/datasets/AIDC-AI/Parrot-dataset/resolve/main/mmbench/mmbench_dev_cn.tsv', + 'MMBench_dev_en': 'https://huggingface.co/datasets/AIDC-AI/Parrot-dataset/resolve/main/mmbench/mmbench_dev_en.tsv', + 'MMBench_dev_pt': 'https://huggingface.co/datasets/AIDC-AI/Parrot-dataset/resolve/main/mmbench/mmbench_dev_pt.tsv', + 'MMBench_dev_tr': 'https://huggingface.co/datasets/AIDC-AI/Parrot-dataset/resolve/main/mmbench/mmbench_dev_tr.tsv', + 'MMBench_dev_ru': 'https://huggingface.co/datasets/AIDC-AI/Parrot-dataset/resolve/main/mmbench/mmbench_dev_ru.tsv', +} + +MMMB_MD5 = { + 'MMMB_ar': 'f3a18b6385f1d9701840aa42de27aead', 'MMMB_cn': '13ed82fa89730037292fcaa27f08f430', + 'MMMB_en': '1cd781a71ec5a2983c090b84105d6a01', 'MMMB_pt': '548ea2b3bb2da991790386f0015d30d1', + 'MMMB_ru': 'ce1cc8a0533425ab0d86b326ebfc2984', 'MMMB_tr': '0733739d43090327975294292bc5cd67' +} + +MTL_MMBench_MD5 = { + 'MMBench_dev_ar': '4271b4a0d0200e1a86380a878e0d64a4', 'MMBench_dev_cn': '2ed5135326fed02c8e51ea50dda8222f', + 'MMBench_dev_en': 'd9ab776fc018b3d45785e9a5c23431c2', 'MMBench_dev_pt': '4ddfbcd27ef12444b908c03831cd0295', + 'MMBench_dev_tr': '4fab39d501389d3d6cc90264bb708f11', 'MMBench_dev_ru': '5ba1171ff2e68f80637bf78349e402a5' +} + + +class ImageMCQDataset(ImageBaseDataset): + + TYPE = 'MCQ' + + DATASET_URL = { + # MMBench v1.0 + 'MMBench_DEV_EN': 'https://opencompass.openxlab.space/utils/benchmarks/MMBench/MMBench_DEV_EN.tsv', + 'MMBench_TEST_EN': 'https://opencompass.openxlab.space/utils/benchmarks/MMBench/MMBench_TEST_EN.tsv', + 'MMBench_DEV_CN': 'https://opencompass.openxlab.space/utils/benchmarks/MMBench/MMBench_DEV_CN.tsv', + 'MMBench_TEST_CN': 'https://opencompass.openxlab.space/utils/benchmarks/MMBench/MMBench_TEST_CN.tsv', + 'MMBench': 'https://opencompass.openxlab.space/utils/benchmarks/MMBench/MMBench.tsv', # Internal + 'MMBench_CN': 'https://opencompass.openxlab.space/utils/benchmarks/MMBench/MMBench_CN.tsv', # Internal + # MMBench v1.1 + 'MMBench_DEV_EN_V11': 'https://opencompass.openxlab.space/utils/benchmarks/MMBench/MMBench_DEV_EN_V11.tsv', + 'MMBench_TEST_EN_V11': 'https://opencompass.openxlab.space/utils/benchmarks/MMBench/MMBench_TEST_EN_V11.tsv', + 'MMBench_DEV_CN_V11': 'https://opencompass.openxlab.space/utils/benchmarks/MMBench/MMBench_DEV_CN_V11.tsv', + 'MMBench_TEST_CN_V11': 'https://opencompass.openxlab.space/utils/benchmarks/MMBench/MMBench_TEST_CN_V11.tsv', + 'MMBench_V11': 'https://opencompass.openxlab.space/utils/benchmarks/MMBench/MMBench_V11.tsv', # Internal + 'MMBench_CN_V11': 'https://opencompass.openxlab.space/utils/benchmarks/MMBench/MMBench_CN_V11.tsv', # Internal + # SEEDBench Series + 'SEEDBench_IMG': 'https://opencompass.openxlab.space/utils/benchmarks/SEEDBench/SEEDBench_IMG.tsv', + 'SEEDBench2': 'https://huggingface.co/datasets/VLMEval/SEEDBench2/resolve/main/SEEDBench2.tsv', + 'SEEDBench2_Plus': 'https://opencompass.openxlab.space/utils/benchmarks/SEEDBench/SEEDBench2_Plus.tsv', + # ScienceQA Series + 'ScienceQA_VAL': 'https://opencompass.openxlab.space/utils/benchmarks/ScienceQA/ScienceQA_VAL.tsv', + 'ScienceQA_TEST': 'https://opencompass.openxlab.space/utils/benchmarks/ScienceQA/ScienceQA_TEST.tsv', + # MMT-Bench + 'MMT-Bench_ALL_MI': 'https://opencompass.openxlab.space/utils/benchmarks/MMT-Bench/MMT-Bench_ALL_MI.tsv', + 'MMT-Bench_ALL': 'https://opencompass.openxlab.space/utils/benchmarks/MMT-Bench/MMT-Bench_ALL.tsv', + 'MMT-Bench_VAL_MI': 'https://opencompass.openxlab.space/utils/benchmarks/MMT-Bench/MMT-Bench_VAL_MI.tsv', + 'MMT-Bench_VAL': 'https://opencompass.openxlab.space/utils/benchmarks/MMT-Bench/MMT-Bench_VAL.tsv', + # AesBench + 'AesBench_VAL': 'https://huggingface.co/datasets/VLMEval/AesBench/resolve/main/AesBench_VAL.tsv', + 'AesBench_TEST': 'https://huggingface.co/datasets/VLMEval/AesBench/resolve/main/AesBench_TEST.tsv', + # Q-Bench1 + 'Q-Bench1_VAL': 'https://huggingface.co/datasets/zhangzicheng/qbench_tsv/resolve/main/Q-Bench1_VAL.tsv', + 'Q-Bench1_TEST': 'https://huggingface.co/datasets/zhangzicheng/qbench_tsv/resolve/main/Q-Bench1_TEST.tsv', + # A-Bench + 'A-Bench_VAL': 'https://huggingface.co/datasets/zhangzicheng/abench_tsv/resolve/main/A-bench_VAL.tsv', + 'A-Bench_TEST': 'https://huggingface.co/datasets/zhangzicheng/abench_tsv/resolve/main/A-bench_TEST.tsv', + # R-Bench + 'R-Bench-Dis': 'https://huggingface.co/datasets/lcysyzxdxc/R-Bench/blob/main/R-bench-dis.tsv', + 'R-Bench-Ref': 'https://huggingface.co/datasets/lcysyzxdxc/R-Bench/blob/main/R-bench-ref.tsv', + # Other Benchmarks + 'CCBench': 'https://opencompass.openxlab.space/utils/VLMEval/CCBench.tsv', + 'AI2D_TEST': 'https://opencompass.openxlab.space/utils/VLMEval/AI2D_TEST.tsv', + 'AI2D_TEST_NO_MASK': 'https://opencompass.openxlab.space/utils/VLMEval/AI2D_TEST_NO_MASK.tsv', + 'MMStar': 'https://opencompass.openxlab.space/utils/VLMEval/MMStar.tsv', + 'RealWorldQA': 'https://opencompass.openxlab.space/utils/VLMEval/RealWorldQA.tsv', + 'MLLMGuard_DS': 'https://opencompass.openxlab.space/utils/VLMEval/MLLMGuard_DS.tsv', + 'BLINK': 'https://opencompass.openxlab.space/utils/VLMEval/BLINK.tsv', + 'TaskMeAnything_v1_imageqa_random': ( + 'https://huggingface.co/datasets/weikaih/TaskMeAnything-v1-imageqa-random/' + 'resolve/main/TaskMeAnything-v1-imageqa-random.tsv' + ), + 'A-OKVQA': 'https://huggingface.co/datasets/Allen8/A-OKVQA/resolve/main/a-okvqa.tsv', + 'WorldMedQA-V': 'https://opencompass.openxlab.space/utils/VLMEval/WorldMedQA-V.tsv', + 'VisOnlyQA-VLMEvalKit': ( + 'https://huggingface.co/datasets/ryokamoi/VisOnlyQA_Eval_Real/' + 'resolve/main/visonlyqa_vlmevalkit.tsv' + ), + '3DSRBench': ( + 'https://huggingface.co/datasets/ccvl/3DSRBench/' + 'resolve/main/3dsrbench_v1_vlmevalkit_circular.tsv' + ), + # For Internal Use Only + 'MMBench_V11_MINI': 'https://opencompass.openxlab.space/utils/TEST/MMBench_V11_MINI.tsv', + 'MMStar_MINI': 'https://opencompass.openxlab.space/utils/TEST/MMStar_MINI.tsv', + 'AI2D_MINI': 'https://opencompass.openxlab.space/utils/TEST/AI2D_MINI.tsv', + } + + DATASET_MD5 = { + # MMBench v1.0 + 'MMBench_DEV_EN': 'b6caf1133a01c6bb705cf753bb527ed8', + 'MMBench_TEST_EN': '6939fadb0ce626fefc0bdc9c64efc528', + 'MMBench_DEV_CN': '08b8fc3324a5ed74155350f57be69fbd', + 'MMBench_TEST_CN': '7e1239baf0ee4c8b513e19705a0f317e', + 'MMBench': '4115aea3383f3dd0083be6a633e0f820', # Internal Only + 'MMBench_CN': '2e053ffc90ea598b1feae13c36dc13ee', # Internal Only + # MMBench v1.1 + 'MMBench_DEV_EN_V11': '30c05be8f2f347a50be25aa067248184', + 'MMBench_TEST_EN_V11': '26f0f15381a21720255091d3e0316ce6', + 'MMBench_DEV_CN_V11': '593f9b5f6bea453d870a798b34ae4f37', + 'MMBench_TEST_CN_V11': '74bbe4556dac745613c7cbe5ad787050', + 'MMBench_V11': 'b9276414f57af1308dcc4d0cd9b42e7c', # Internal Only + 'MMBench_CN_V11': '95f6980dd1b4de38e3cbffe0305a3f25', # Internal Only + # SEEDBench + 'SEEDBench_IMG': '68017231464752261a2526d6ca3a10c0', + 'SEEDBench2': '4ec15cf864c4f16274112284f531813e', + 'SEEDBench2_Plus': '7cb2323950d71f049df70e5162062af3', + # ScienceQA + 'ScienceQA_VAL': '96320d05e142e585e7204e72affd29f3', + 'ScienceQA_TEST': 'e42e9e00f9c59a80d8a5db35bc32b71f', + # MMT-Bench + 'MMT-Bench_ALL_MI': '5272157097e19cdd7cb41e412ab3b7c7', + 'MMT-Bench_ALL': 'b273a2f4c596fe4f2605de0494cd632f', + 'MMT-Bench_VAL_MI': 'c7d7b998eb5cd9aa36c7d4f721472462', + 'MMT-Bench_VAL': '8dd4b730f53dbf9c3aed90ca31c928e0', + # AesBench + 'AesBench_VAL': '3edb0c319e9187aa0b97fe7a11700a8c', + 'AesBench_TEST': '58b1f7ba2cc32e1d68896d6ee716bbf8', + # Q-Bench1 + 'Q-Bench1_VAL': '837bdb6cd2da571713543462815187b7', + 'Q-Bench1_TEST': '15e759bfd58c9d5f30b23a317d347153', + # A-Bench + 'A-Bench_VAL': '218563ec50d34bb336c814143a5bb9c1', + 'A-Bench_TEST': '567013fb033a20cf23f51d8e865bd16c', + # R-Bench + 'R-Bench-Dis': 'd6e961dbfc43350688af2560226830b4', + 'R-Bench-Ref': '270c1cb555acb523f3fdb178ed57021d', + # Other Benchmarks + 'CCBench': 'f5dde47f24dc5a6fb6e595b409b466ac', + 'AI2D_TEST': '0f593e0d1c7df9a3d69bf1f947e71975', + 'AI2D_TEST_NO_MASK': 'fd8f463634d4fe9fbd23b876e8eea5be', + 'MMStar': 'e1ecd2140806c1b1bbf54b43372efb9e', + 'RealWorldQA': '4de008f55dc4fd008ca9e15321dc44b7', + 'MLLMGuard_DS': '975fc0dd7119386e198c37d71e274b3f', + 'BLINK': '3b6649b6a662184ea046908e5506260e', + 'TaskMeAnything_v1_imageqa_random': '023fef69e2ca21827afb77c5ec3bc889', + 'WorldMedQA-V': '441e63875e30c87f5750528b57b41285', + "VisOnlyQA-VLMEvalKit": 'cf460a31d2acb8d3a7cecd0e69298bfa', + '3DSRBench': '13a99f33164dc1b9faf0e8b8b01fd6f2', + } + + DATASET_URL.update(MMMB_URLS) + DATASET_URL.update(MTL_MMBench_URLS) + DATASET_MD5.update(MMMB_MD5) + DATASET_MD5.update(MTL_MMBench_MD5) + + def build_prompt(self, line): + + if isinstance(line, int): + line = self.data.iloc[line] + + if self.meta_only: + tgt_path = toliststr(line['image_path']) + else: + tgt_path = self.dump_image(line) + + question = line['question'] + options = { + cand: line[cand] + for cand in string.ascii_uppercase + if cand in line and not pd.isna(line[cand]) + } + options_prompt = 'Options:\n' + for key, item in options.items(): + options_prompt += f'{key}. {item}\n' + hint = line['hint'] if ('hint' in line and not pd.isna(line['hint'])) else None + prompt = '' + if hint is not None: + prompt += f'Hint: {hint}\n' + prompt += f'Question: {question}\n' + if len(options): + prompt += options_prompt + prompt += 'Please select the correct answer from the options above. \n' + + msgs = [] + if isinstance(tgt_path, list): + msgs.extend([dict(type='image', value=p) for p in tgt_path]) + else: + msgs = [dict(type='image', value=tgt_path)] + msgs.append(dict(type='text', value=prompt)) + + return msgs + + def evaluate(self, eval_file, **judge_kwargs): + from .utils.multiple_choice import report_acc, report_acc_MMT, mcq_circular_eval, mcq_vanilla_eval + # assert dataset is not None + dataset_map = { + 'MMBench_TEST_EN': 'MMBench', 'MMBench_TEST_EN_V11': 'MMBench_V11', + 'MMBench_TEST_CN': 'MMBench_CN', 'MMBench_TEST_CN_V11': 'MMBench_CN_V11' + } + dataset = self.dataset_name + if dataset in dataset_map: + dataset = dataset_map[dataset] + nproc = judge_kwargs.pop('nproc', 4) + + circular = False + if listinstr(['mmbench', 'ccbench', 'circular'], dataset.lower()): + data = load(eval_file) + data['index'] = [int(x) for x in data['index']] + dump(data, eval_file) + circular = True + + suffix = eval_file.split('.')[-1] + model = judge_kwargs.get('model', 'exact_matching') + assert model in ['chatgpt-0125', 'exact_matching', 'gpt-4-0125'] + name_str_map = {'chatgpt-0125': 'openai', 'gpt-4-0125': 'gpt4'} + name_str = name_str_map[model] if model in name_str_map else model + + if model == 'exact_matching': + model = None + elif gpt_key_set(): + model = build_judge(**judge_kwargs) + if not model.working(): + warnings.warn('OPENAI API is not working properly, will use exact matching for evaluation') + warnings.warn(DEBUG_MESSAGE) + model = None + else: + warnings.warn('OPENAI_API_KEY is not set properly, will use exact matching for evaluation') + model = None + + result_file = eval_file.replace(f'.{suffix}', f'_{name_str}_result.pkl') + + data = load(eval_file) + data = data.sort_values(by='index') + data['prediction'] = [str(x) for x in data['prediction']] + # If not choice label, then use lower case + for k in data.keys(): + data[k.lower() if k not in list(string.ascii_uppercase) else k] = data.pop(k) + + meta = self.data + meta_q_map = {x: y for x, y in zip(meta['index'], meta['question'])} + data_map = {x: y for x, y in zip(data['index'], data['question'])} + for k in data_map: + assert k in meta_q_map, ( + f'eval_file should be the same as or a subset of dataset {self.dataset_name}' + ) + + if circular: + data = mcq_circular_eval(model, data, meta, nproc, result_file, self.dataset_name) + else: + data = mcq_vanilla_eval(model, data, meta, nproc, result_file, self.dataset_name) + + # load split + dump(data, eval_file.replace(f'.{suffix}', f'_{name_str}_result.{suffix}')) + data = load(eval_file.replace(f'.{suffix}', f'_{name_str}_result.{suffix}')) + + # May have different report acc functions for different datasets + if 'MMT' in dataset: + acc = report_acc_MMT(data) + else: + acc = report_acc(data) + + score_file = eval_file.replace(f'.{suffix}', '_acc.csv') + dump(acc, score_file) + + if dataset == 'AesBench_VAL': + warnings.warn('Note that AesBench VAL is just a toy version of AesBench TEST. For full results, \ + please evaluate on AesBench TEST. The AesBench TEST dataset is more than 20 times \ + larger than the VAL dataset and the leaderboard results are based on AesBench TEST.') + if dataset == 'VisOnlyQA-VLMEvalKit': + warnings.warn('Note that the results on VisOnlyQA-VLMEvalKit are different from the results on \ + the original VisOnlyQA. VisOnlyQA-VLMEvalKit does not include the \ + chemistry__shape_multi split and uses a different evaluation prompt. Please \ + explicitly specify the version of the dataset when you report results.') + + return acc + + +class MMMUDataset(ImageMCQDataset): + + DATASET_URL = { + 'MMMU_DEV_VAL': 'https://opencompass.openxlab.space/utils/VLMEval/MMMU_DEV_VAL.tsv', + 'MMMU_TEST': 'https://opencompass.openxlab.space/utils/VLMEval/MMMU_TEST.tsv', + } + + DATASET_MD5 = { + 'MMMU_DEV_VAL': '585e8ad75e73f75dcad265dfd0417d64', + 'MMMU_TEST': 'c19875d11a2d348d07e5eb4bdf33166d', + } + + @staticmethod + def split_MMMU(msgs): + text, images = None, [] + for s in msgs: + if s['type'] == 'image': + images.append(s['value']) + elif s['type'] == 'text': + assert text is None + text = s['value'] + text_segs = text.split('' + image_idx = int(seg[0]) - 1 + segs.append(dict(type='image', value=images[image_idx])) + segs.append(dict(type='text', value=seg[2:])) + return segs + + def build_prompt(self, line): + msgs = super().build_prompt(line) + msgs = self.split_MMMU(msgs) + return msgs + + +class MMMUProDataset(MMMUDataset): + + TYPE = 'MCQ_MMMU_Pro' + + def __init__(self, **kwargs): + super().__init__(**kwargs) + if 'MMMU_Pro_V' in self.dataset_name: + self.data['question'] = ['placeholder'] * len(self.data) + + DATASET_URL = { + 'MMMU_Pro_10c': 'https://opencompass.openxlab.space/utils/VLMEval/MMMU_Pro_10c.tsv', + 'MMMU_Pro_10c_COT': 'https://opencompass.openxlab.space/utils/VLMEval/MMMU_Pro_10c.tsv', + 'MMMU_Pro_V': 'https://opencompass.openxlab.space/utils/VLMEval/MMMU_Pro_V.tsv', + 'MMMU_Pro_V_COT': 'https://opencompass.openxlab.space/utils/VLMEval/MMMU_Pro_V.tsv', + } + + DATASET_MD5 = { + 'MMMU_Pro_10c': '22cee868fe6b680d14b99bfff6db8172', + 'MMMU_Pro_10c_COT': '22cee868fe6b680d14b99bfff6db8172', + 'MMMU_Pro_V': 'd01441a87b3dbe721b5a04652ae38009', + 'MMMU_Pro_V_COT': 'd01441a87b3dbe721b5a04652ae38009', + } + + def build_prompt(self, line): + if isinstance(line, int): + line = self.data.iloc[line] + + if self.meta_only: + tgt_path = toliststr(line['image_path']) + else: + tgt_path = self.dump_image(line) + + if 'MMMU_Pro_V' in self.dataset_name: + question = 'Answer the following multiple-choice question in the image. ' + if 'COT' in self.dataset_name: + question += ( + "The last line of your response should be of the following format: 'Answer: $LETTER' " + "(without quotes) where LETTER is one of the options. Think step by step before answering. " + ) + else: + question += "Answer directly with the option letter from the given choices. " + if isinstance(tgt_path, list): + assert len(tgt_path) == 1 + tgt_path = tgt_path[0] + return [dict(type='image', value=tgt_path), dict(type='text', value=question)] + else: + question = line['question'] + options = { + cand: line[cand] + for cand in string.ascii_uppercase + if cand in line and not pd.isna(line[cand]) + } + options_prompt = 'Options:\n' + for key, item in options.items(): + options_prompt += f'{key}. {item}\n' + prompt = '' + prompt += f'Question: {question}\n' + if len(options): + prompt += options_prompt + if 'COT' in self.dataset_name: + prompt += ( + "Answer the following multiple-choice question. The last line of your response should be of " + "the following format: 'Answer: $LETTER' (without quotes) where LETTER is one of the options. " + "Think step by step before answering. " + ) + else: + prompt += "Answer directly with the option letter from the given choices. " + + msgs = [] + if isinstance(tgt_path, list): + msgs.extend([dict(type='image', value=p) for p in tgt_path]) + else: + msgs = [dict(type='image', value=tgt_path)] + msgs.append(dict(type='text', value=prompt)) + msgs = self.split_MMMU(msgs) + return msgs + + def cot_postproc(self, response): + lines = response.strip().split('\n') + lines = [x.strip() for x in lines] + cands = [x for x in lines if x.startswith('Answer:')] + if len(cands) == 1: + counter = defaultdict(lambda: 0) + for ch in cands[0]: + if ch in string.ascii_uppercase: + counter[ch] += 1 + if len(counter) == 1: + return list(counter.keys())[0] + else: + return cands[0][7:] + return response + + def evaluate(self, eval_file, **judge_kwargs): + if 'COT' in self.dataset_name: + data = load(eval_file) + data['prediction'] = [self.cot_postproc(x) for x in data['prediction']] + tgt = eval_file.replace('.xlsx', '_cotpost.xlsx') + dump(data, tgt) + res = super().evaluate(tgt, **judge_kwargs) + acc_org = eval_file.replace('.xlsx', '_acc.csv') + acc_now = eval_file.replace('.xlsx', '_cotpost_acc.csv') + shutil.copy(acc_now, acc_org) + return res + else: + return super().evaluate(eval_file, **judge_kwargs) + + +class MUIRDataset(ImageMCQDataset): + + DATASET_URL = { + 'MUIRBench': 'http://opencompass.openxxlab.com/utils/VLMEval/MUIRBench.tsv' + } + + DATASET_MD5 = { + 'MUIRBench': '2e5e6fd7699761b08a7cb3ab8c0c2ec8' + } + + @staticmethod + def split_MUIR(msgs): + text, images = None, [] + + # Separate images and text from msgs + for s in msgs: + if s['type'] == 'image': + images.append(s['value']) + elif s['type'] == 'text': + assert text is None # Ensure only one text entry is expected + text = s['value'] + + # Split text by tags + text_segs = text.split('') + + # Initialize the segments list + segs = [] + + # Iterate through the text segments and images + for i, seg in enumerate(text_segs): + # Append the image if this is not the first segment and there are still images left + if i > 0 and i - 1 < len(images): + segs.append(dict(type='image', value=images[i - 1])) + # Append the text segment (if it's non-empty) + if len(seg) > 0: + segs.append(dict(type='text', value=seg)) + + return segs + + def build_prompt(self, line): + + if isinstance(line, int): + line = self.data.iloc[line] + + if self.meta_only: + tgt_path = toliststr(line['image_path']) + else: + tgt_path = self.dump_image(line) + + question = line['question'] + options = { + cand: line[cand] + for cand in string.ascii_uppercase + if cand in line and not pd.isna(line[cand]) + } + # options_prompt = '' + options_prompt = '\n'.join([f'{key}. {item}' for key, item in options.items()]) + # for key, item in options.items(): + # options_prompt += f'{key}. {item}\n' + + prompt = '' + + prompt += f'{question}\n' + if len(options): + prompt += options_prompt + prompt += "\nAnswer with the option's letter from the given choices directly." + + msgs = [] + if isinstance(tgt_path, list): + msgs.extend([dict(type='image', value=p) for p in tgt_path]) + else: + msgs = [dict(type='image', value=tgt_path)] + msgs.append(dict(type='text', value=prompt)) + + msgs = self.split_MUIR(msgs) + return msgs + + +class GMAIMMBenchDataset(ImageMCQDataset): + + DATASET_URL = { + 'GMAI-MMBench_VAL': 'https://huggingface.co/datasets/VLMEval/GMAI-MMBench/resolve/main/GMAI-MMBench_VAL.tsv', + 'GMAI_mm_bench_TEST_part_1': 'https://huggingface.co/datasets/OpenGVLab/GMAI-MMBench/resolve/main/GMAI_mm_bench_TEST_part_1.tsv', # noqa: E501 + 'GMAI_mm_bench_TEST_part_2': 'https://huggingface.co/datasets/OpenGVLab/GMAI-MMBench/resolve/main/GMAI_mm_bench_TEST_part_2.tsv', # noqa: E501 + 'GMAI_mm_bench_TEST_part_3': 'https://huggingface.co/datasets/OpenGVLab/GMAI-MMBench/resolve/main/GMAI_mm_bench_TEST_part_3.tsv', # noqa: E501 + 'GMAI_mm_bench_TEST_part_4': 'https://huggingface.co/datasets/OpenGVLab/GMAI-MMBench/resolve/main/GMAI_mm_bench_TEST_part_4.tsv', # noqa: E501 + 'GMAI_mm_bench_TEST_part_5': 'https://huggingface.co/datasets/OpenGVLab/GMAI-MMBench/resolve/main/GMAI_mm_bench_TEST_part_5.tsv', # noqa: E501 + 'GMAI_mm_bench_TEST_part_6': 'https://huggingface.co/datasets/OpenGVLab/GMAI-MMBench/resolve/main/GMAI_mm_bench_TEST_part_6.tsv', # noqa: E501 + 'GMAI_mm_bench_TEST_part_7': 'https://huggingface.co/datasets/OpenGVLab/GMAI-MMBench/resolve/main/GMAI_mm_bench_TEST_part_7.tsv', # noqa: E501 + 'GMAI_mm_bench_TEST_part_8': 'https://huggingface.co/datasets/OpenGVLab/GMAI-MMBench/resolve/main/GMAI_mm_bench_TEST_part_8.tsv', # noqa: E501 + 'GMAI_mm_bench_TEST_part_9': 'https://huggingface.co/datasets/OpenGVLab/GMAI-MMBench/resolve/main/GMAI_mm_bench_TEST_part_9.tsv', # noqa: E501 + 'GMAI_mm_bench_TEST_part_10': 'https://huggingface.co/datasets/OpenGVLab/GMAI-MMBench/resolve/main/GMAI_mm_bench_TEST_part_10.tsv', # noqa: E501 + 'GMAI_mm_bench_TEST_part_11': 'https://huggingface.co/datasets/OpenGVLab/GMAI-MMBench/resolve/main/GMAI_mm_bench_TEST_part_11.tsv', # noqa: E501 + } + + DATASET_MD5 = { + 'GMAI-MMBench_VAL': '254bd581627866f1c499d3d6b4422324', + 'GMAI_mm_bench_TEST_part_1': '900d735231230a63f4ed45665c078ef4', + 'GMAI_mm_bench_TEST_part_2': '1b27ab621386945d7e4a765ad2d22b0e', + 'GMAI_mm_bench_TEST_part_3': '44bdc2b6267dd505d529b8cad06f0fb2', + 'GMAI_mm_bench_TEST_part_4': '5a04a04fcac9f1466709f242fdb80acb', + 'GMAI_mm_bench_TEST_part_5': 'c70baf8909eda9af0ddeab275c721336', + 'GMAI_mm_bench_TEST_part_6': '825abc39596b644dead9350d0cfa3b96', + 'GMAI_mm_bench_TEST_part_7': 'defb8aed2fb77365a76b6b9abd6a2701', + 'GMAI_mm_bench_TEST_part_8': 'ff490d60b85f2bb0abb67a435b298c65', + 'GMAI_mm_bench_TEST_part_9': 'ff67c86f40da93b09139ac1d1ba5dc6b', + 'GMAI_mm_bench_TEST_part_10': '3dae94627b9ac0fe00180d4780fbf6dc', + 'GMAI_mm_bench_TEST_part_11': 'd08dc813f0eb6bbab63cae2a9d113c4b', + } + + @classmethod + def supported_datasets(cls): + return ['GMAI-MMBench_VAL', 'GMAI-MMBench_TEST'] + + def load_data(self, dataset): + if dataset == 'GMAI-MMBench_VAL': + data_path = osp.join(LMUDataRoot(), f'{dataset}.tsv') + if file_size(data_path, 'GB') > 1: + local_path = data_path.replace('.tsv', '_local.tsv') + if not osp.exists(local_path) or os.environ.get('FORCE_LOCAL'): + from ..tools import LOCALIZE + LOCALIZE(data_path, local_path) + data_path = local_path + return load(data_path) + elif dataset == 'GMAI-MMBench_TEST': + dfs = [] + for part_num in range(1, 12): + part_name = f'GMAI_mm_bench_TEST_part_{part_num}' + url = self.DATASET_URL[part_name] + file_md5 = self.DATASET_MD5.get(part_name) + tsv_path = osp.join(LMUDataRoot(), f'{part_name}.tsv') + if not osp.exists(tsv_path) or (file_md5 and md5(tsv_path) != file_md5): + download_file(url, filename=tsv_path) + local_path = tsv_path.replace('.tsv', '_local.tsv') + if not osp.exists(local_path) or os.environ.get('FORCE_LOCAL'): + from ..tools import LOCALIZE + LOCALIZE(tsv_path, local_path) + tsv_path = local_path + # 加载数据 + df = load(tsv_path) + dfs.append(df) + # 合并所有数据 + data = pd.concat(dfs, ignore_index=True) + return data + else: + raise ValueError(f"未知的数据集:{dataset}") + + def report_acc_by_groups(self, df, group_column): + res = defaultdict(list) + + # Check for the 'split' column + if 'split' in df: + splits = list(set(df['split'])) + res['split'] = splits + else: + df['split'] = ['none'] * len(df) + res['split'] = ['none'] + + res['Overall'] = [np.mean(df[df['split'] == sp]['hit']) for sp in res['split']] + + if group_column not in df: + raise ValueError(f"Column '{group_column}' not found in dataframe.") # noqa: E713 + + abilities = list(set(df[group_column])) + abilities = ['None' if isinstance(ab, float) and pd.isna(ab) else ab for ab in abilities] + abilities.sort() + + for ab in abilities: + ab_name = ab + sub_df = df[df[group_column] == ab] + res[ab_name] = [np.mean(sub_df[sub_df['split'] == sp]['hit']) for sp in res['split']] + + return pd.DataFrame(res) + + def evaluate(self, eval_file, **judge_kwargs): + from .utils.multiple_choice import report_acc, mcq_vanilla_eval + nproc = judge_kwargs.pop('nproc', 4) + + suffix = eval_file.split('.')[-1] + model = judge_kwargs.get('model', 'exact_matching') + assert model in ['chatgpt-0125', 'exact_matching', 'gpt-4-0125'] + name_str_map = {'chatgpt-0125': 'openai', 'gpt-4-0125': 'gpt4'} + name_str = name_str_map[model] if model in name_str_map else model + + if model == 'exact_matching': + model = None + elif gpt_key_set(): + model = build_judge(**judge_kwargs) + if not model.working(): + warnings.warn('OPENAI API is not working properly, will use exact matching for evaluation') + warnings.warn(DEBUG_MESSAGE) + model = None + else: + warnings.warn('OPENAI_API_KEY is not set properly, will use exact matching for evaluation') + model = None + + result_file = eval_file.replace(f'.{suffix}', f'_{name_str}_result.pkl') + + data = load(eval_file) + data = data.sort_values(by='index') + data['prediction'] = [str(x) for x in data['prediction']] + # If not choice label, then use lower case + for k in data.keys(): + data[k.lower() if k not in list(string.ascii_uppercase) else k] = data.pop(k) + + meta = self.data + meta_q_map = {x: y for x, y in zip(meta['index'], meta['question'])} + data_map = {x: y for x, y in zip(data['index'], data['question'])} + for k in data_map: + assert k in meta_q_map, ( + f'eval_file should be the same as or a subset of dataset {self.dataset_name}' + ) + + data = mcq_vanilla_eval(model, data, meta, nproc, result_file, self.dataset_name) + + # load split + dump(data, eval_file.replace(f'.{suffix}', f'_{name_str}_result.{suffix}')) + data = load(eval_file.replace(f'.{suffix}', f'_{name_str}_result.{suffix}')) + + acc = report_acc(data) + + for group_col in ['clinical vqa task', 'department', 'perceptual granularity']: + acc_grouped = self.report_acc_by_groups(data, group_col) + score_file_grouped = eval_file.replace(f'.{suffix}', f'_{group_col}_acc.csv') + dump(acc_grouped, score_file_grouped) + + return acc + + +class MMERealWorld(ImageMCQDataset): + + TYPE = 'MMERealWorld' + + DATASET_MD5 = { + 'MME-RealWorld': '271c33ec814c39533c467ec6fb8a6f36', + 'MME-RealWorld-Lite': '4c17057d7d3b6c4a0d4397c3dae0881c', + 'MME-RealWorld-CN': 'daaa763d52a760a38606d5dedb3fe444', + } + SYS = { + 'MME-RealWorld': ( + 'Select the best answer to the above multiple-choice question based on the image. ' + 'Respond with only the letter (A, B, C, D, or E) of the correct option. \n' + 'The best answer is:' + ), + 'MME-RealWorld-Lite': ( + 'Select the best answer to the above multiple-choice question based on the image. ' + 'Respond with only the letter (A, B, C, D, or E) of the correct option. \n' + 'The best answer is:' + ), + 'MME-RealWorld-CN': ( + '根据图像选择上述多项选择题的最佳答案。只需回答正确选项的字母(A, B, C, D 或 E)。\n' + '最佳答案为:' + ), + } + + @classmethod + def supported_datasets(cls): + return ['MME-RealWorld', 'MME-RealWorld-CN', 'MME-RealWorld-Lite',] + + def load_data( + self, dataset="MME-RealWorld", repo_id="yifanzhang114/MME-RealWorld-Base64" + ): + + def check_integrity(pth): + data_file = osp.join(pth, f"{dataset}.tsv") + + if not os.path.exists(data_file): + return False + + if md5(data_file) != self.DATASET_MD5[dataset]: + return False + return True + + def generate_tsv(pth): + tsv_file = os.path.join(pth, f"{dataset}.tsv") + + if os.path.exists(tsv_file): + print(f"{tsv_file} already exists.") + return + + json_dir = os.path.join(pth, dataset) + json_files = [f for f in os.listdir(json_dir) if f.endswith(".json")] + + data_list = [] + for json_file in json_files: + with open(os.path.join(json_dir, json_file), "r") as f: + data = json.load(f) + for item in tqdm(data): + choice_prompt = ( + "The choices are listed below:\n" + if dataset in ["MME-RealWorld", "MME-RealWorld-Lite"] + else "选项如下所示:\n" + ) + data_list.append( + { + "index": item["index"], + "image": item["image"], + "question": item["question"], + "multi-choice options": choice_prompt + + "\n".join(item["multi-choice options"]), + "A": item["multi-choice options"][0][4:], + "B": item["multi-choice options"][1][4:], + "C": item["multi-choice options"][2][4:], + "D": item["multi-choice options"][3][4:], + "E": item["multi-choice options"][4][4:], + "answer": item["answer"], + "category": item["category"], + "l2-category": item["l2-category"], + } + ) + df = pd.DataFrame(data_list) + df.to_csv(tsv_file, sep="\t", index=False) + print(f"TSV file saved to {tsv_file}") + + # Check if dataset is cached and has integrity + if dataset == "MME-RealWorld-Lite": + url = 'https://huggingface.co/datasets/yifanzhang114/MME-RealWorld-Base64/resolve/main/mme_realworld_lite.tsv' # noqa: E501 + file_md5 = ( + self.DATASET_MD5[dataset] if dataset in self.DATASET_MD5 else None + ) + datas = self.prepare_tsv(url, file_md5) + choice_prompt = "The choices are listed below:\n" + for index, item in datas.iterrows(): + options = eval(item["multi-choice options"]) + datas.loc[index, "multi-choice options"] = choice_prompt + "\n".join( + options + ) + datas.loc[index, "A"] = options[0][4:] + datas.loc[index, "B"] = options[1][4:] + datas.loc[index, "C"] = options[2][4:] + datas.loc[index, "D"] = options[3][4:] + datas.loc[index, "E"] = options[4][4:] + return datas + + update_flag = False + cache_path = get_cache_path(repo_id) + if cache_path is not None and check_integrity(cache_path): + dataset_path = cache_path + print(f"Using cached dataset from {cache_path}") + else: + from huggingface_hub import snapshot_download + + # Download or find the dataset path + dataset_path = snapshot_download(repo_id=repo_id, repo_type="dataset") + generate_tsv(dataset_path) + update_flag = True + + data_path = os.path.join(dataset_path, f"{dataset}.tsv") + if file_size(data_path, "GB") > 1: + local_path = data_path.replace(".tsv", "_local.tsv") + if ( + not osp.exists(local_path) + or os.environ.get("FORCE_LOCAL", None) + or update_flag + ): + from vlmeval.tools import LOCALIZE + + LOCALIZE(data_path, local_path) + data_path = local_path + return load(data_path) + + def post_build(self, dataset): + self.TYPE = 'MMERealWorld' + + # Given one data record, return the built prompt (a multi-modal message), can override + def build_prompt(self, line): + if isinstance(line, int): + line = self.data.iloc[line] + + if self.meta_only: + tgt_path = toliststr(line['image_path']) + else: + tgt_path = self.dump_image(line) + + question = line['question'] + + choice_prompt = line['multi-choice options'] + '\n' + question += ' ' + choice_prompt + self.SYS[self.dataset_name] + + msgs = [] + if isinstance(tgt_path, list): + msgs.extend([dict(type='image', value=p) for p in tgt_path]) + else: + msgs = [dict(type='image', value=tgt_path)] + msgs.append(dict(type='text', value=question)) + return msgs + + # It returns a dictionary + @classmethod + def evaluate(self, eval_file, **judge_kwargs): + from .utils.multiple_choice import extract_characters_regex, get_dimension_rating + assert eval_file.endswith('.xlsx'), 'data file should be an xlsx file' + FAIL_MSG = 'Failed to obtain answer via API.' + tmp_file = eval_file.replace('.xlsx', '_tmp.pkl') + tgt_file = eval_file.replace('.xlsx', '_rating.json') + score_file = eval_file.replace('.xlsx', '_score.xlsx') + + if not osp.exists(score_file): + + res = {} if not osp.exists(tmp_file) else load(tmp_file) + res = {k: v for k, v in res.items() if FAIL_MSG not in v} + + data = load(eval_file) + cnt_rejected = 0 + data_un = data[~pd.isna(data['prediction'])] + + for idx in data['index']: + ans = data.loc[data['index'] == idx, 'answer'].values[0] + pred = data.loc[data['index'] == idx, 'prediction'].values[0] + + extract_pred = extract_characters_regex(pred) + if extract_pred == '': + cnt_rejected += 1 + data.loc[data['index'] == idx, 'score'] = 0 + else: + data.loc[data['index'] == idx, 'score'] = int(extract_pred == ans) + + print( + f'Among {len(data)} questions, failed to obtain prediction for {len(data) - len(data_un)} questions, ' + f'failed to obtain the score for another {cnt_rejected} questions. ' + f'Those questions will be counted as 0 score in ALL rating.' + ) + + dump(data, score_file) + + rating = get_dimension_rating(score_file) + dump(rating, tgt_file) + return rating + + +class HRBenchDataset(ImageMCQDataset): + + DATASET_URL = { + 'HRBench4K': 'https://huggingface.co/datasets/DreamMr/HR-Bench/resolve/main/hr_bench_4k.tsv', + 'HRBench8K': 'https://huggingface.co/datasets/DreamMr/HR-Bench/resolve/main/hr_bench_8k.tsv', + } + + DATASET_MD5 = { + 'HRBench4K': 'f6b041b03d49543494b8a56d2e35be65', + 'HRBench8K': '274c9c7f89329b804a4723178a00219c', + } + + def evaluate(self, eval_file, **judge_kwargs): + assert os.path.exists(eval_file), '{} does not exist!'.format(eval_file) + from .utils.multiple_choice import mcq_vanilla_eval + from .utils.hrbench import report_acc_hrbench + nproc = judge_kwargs.pop('nproc', 4) + + suffix = eval_file.split('.')[-1] + model = judge_kwargs.get('model', 'extract_matching') + assert model in ['chatgpt-0125', 'exact_matching', 'gpt-4-0125'] + name_str_map = {'chatgpt-0125': 'openai', 'gpt-4-0125': 'gpt4'} + name_str = name_str_map[model] if model in name_str_map else model + + if model == 'exact_matching': + model = None + elif gpt_key_set(): + model = build_judge(**judge_kwargs) + if not model.working(): + warnings.warn('OPENAI API is not working properly, will use exact matching for evaluation') + warnings.warn(DEBUG_MESSAGE) + model = None + else: + warnings.warn('OPENAI_API_KEY is not set properly, will use exact matching for evaluation') + model = None + + result_file = eval_file.replace(f'.{suffix}', f'_{name_str}_result.pkl') + + data = load(eval_file) + data = data.sort_values(by='index') + data['prediction'] = [str(x) for x in data['prediction']] + # If not choice label, then use lower case + for k in data.keys(): + data[k.lower() if k not in list(string.ascii_uppercase) else k] = data.pop(k) + + meta = self.data + meta_q_map = {x: y for x, y in zip(meta['index'], meta['question'])} + data_map = {x: y for x, y in zip(data['index'], data['question'])} + for k in data_map: + assert k in meta_q_map, ( + f'eval_file should be the same as or a subset of dataset {self.dataset_name}' + ) + + score_file = eval_file.replace(f'.{suffix}', '_acc.csv') + + if osp.exists(score_file): + acc = load(score_file) + return acc + data = mcq_vanilla_eval(model, data, meta, nproc, result_file, self.dataset_name) + dump(data, eval_file.replace(f'.{suffix}', f'_{name_str}_result.{suffix}')) + data = load(eval_file.replace(f'.{suffix}', f'_{name_str}_result.{suffix}')) + + acc = report_acc_hrbench(data) + + score_file = eval_file.replace(f'.{suffix}', '_acc.csv') + dump(acc, score_file) + + return acc + + +class CustomMCQDataset(ImageMCQDataset): + + def load_data(self, dataset): + data_path = osp.join(LMUDataRoot(), f'{dataset}.tsv') + + if file_size(data_path, 'GB') > 1: + local_path = data_path.replace('.tsv', '_local.tsv') + if not osp.exists(local_path) or os.environ.get('FORCE_LOCAL', None): + from ..tools import LOCALIZE + LOCALIZE(data_path, local_path) + data_path = local_path + return load(data_path) + + +class NaturalBenchDataset(ImageMCQDataset): + + DATASET_URL = { + 'NaturalBenchDataset': ( + 'https://huggingface.co/datasets/BaiqiL/' + 'NaturalBench/resolve/main/NaturalBenchDataset.tsv' + ), + } + DATASET_MD5 = { + 'NaturalBenchDataset':'dbe25b044bc35696426381e9ba4fe930', + } + + def build_prompt(self, line): + SUFFIX_FOR_VQA = { + "yes_no": "Please answer Yes or No.", + "multiple_choice": "Please output the letter corresponding to the correct option." + } + if isinstance(line, int): + line = self.data.iloc[line] + + if self.meta_only: + tgt_path = toliststr(line['image_path']) + else: + tgt_path = self.dump_image(line) + + question = line['question'] + prompt = f'{question} {SUFFIX_FOR_VQA[line["type"]]}' + msgs = [] + if isinstance(tgt_path, list): + msgs.extend([dict(type='image', value=p) for p in tgt_path]) + else: + msgs = [dict(type='image', value=tgt_path)] + msgs.append(dict(type='text', value=prompt)) + + return msgs + + def evaluate(self, eval_file, **judge_kwargs): + from .utils.naturalbench import extract_answer, get_scores + + data = load(eval_file) + data = data.sort_values(by='index') + predictions = [str(x) for x in data['prediction']] + answers = [str(x) for x in data['answer']] + indexs = [str(x) for x in data['index']] + meta = self.data + types = [str(x) for x in meta['type']] + results = {} + assert len(predictions) == len(answers) == len(indexs) == len(types) == (1900 * 4) + number_answered_samples = len(predictions) // 4 + for i in range(number_answered_samples): + results[i] = { + "q0_i0": extract_answer(predictions[i * 4], types[i * 4]), + "q0_i1": extract_answer(predictions[i * 4 + 1], types[i * 4 + 1]), + "q1_i0": extract_answer(predictions[i * 4 + 2], types[i * 4 + 2]), + "q1_i1": extract_answer(predictions[i * 4 + 3], types[i * 4 + 3]) + } + + scores = get_scores(results) + print(scores) + score_file = 'NaturalBench_acc.csv' + df = pd.DataFrame(list(scores.items()), columns=['Metric', 'Score']) + dump(df, score_file) + + return scores + + +class WeMath(ImageBaseDataset): + TYPE = 'MCQ' + DATASET_URL = { + 'WeMath': 'https://opencompass.openxlab.space/utils/VLMEval/WeMath.tsv', + 'WeMath_COT': 'https://opencompass.openxlab.space/utils/VLMEval/WeMath.tsv', + } + DATASET_MD5 = {'WeMath': 'b5e969a075f01290a542411fb7766388', + 'WeMath_COT': 'b5e969a075f01290a542411fb7766388'} + + def build_prompt(self, line): + if isinstance(line, int): + line = self.data.iloc[line] + + if self.meta_only: + tgt_path = toliststr(line['image_path']) + else: + tgt_path = self.dump_image(line) + + question = line['question'] + options = { + cand: line[cand] + for cand in string.ascii_uppercase + if cand in line and not pd.isna(line[cand]) + } + options_prompt = 'Options:\n' + for key, item in options.items(): + options_prompt += f'{key}. {item}\n' + hint = line['hint'] if ('hint' in line and not pd.isna(line['hint'])) else None + prompt = '' + if hint is not None: + prompt += f'Hint: {hint}\n' + prompt += f'Question: {question}\n' + if len(options): + prompt += options_prompt + + if 'COT' in self.dataset_name: + requirement = line['requirement'] + if requirement is not None: + prompt += f'\n{requirement}' + + msgs = [] + if isinstance(tgt_path, list): + msgs.extend([dict(type='image', value=p) for p in tgt_path]) + else: + msgs = [dict(type='image', value=tgt_path)] + msgs.append(dict(type='text', value=prompt)) + + return msgs + + def evaluate(self, eval_file, **judge_kwargs): + from .utils.wemath import wemath_evaluate_models, wemath_accuracy + from .utils.multiple_choice import mcq_vanilla_eval + + # model = judge_kwargs['model'] + model = judge_kwargs.get('model', 'exact_matching') + assert model in ['exact_matching', 'gpt-4-0125', 'gpt-4-turbo', 'gpt-4o-mini'], model + name_str_map = {'gpt-4-0125': 'gpt4', 'gpt-4-turbo': 'gpt4-turbo', 'gpt-4o-mini': 'gpt4o-mini'} + name_str = name_str_map[model] if model in name_str_map else model + + if model == 'exact_matching': + model = None + elif gpt_key_set(): + model = build_judge(**judge_kwargs) + if not model.working(): + warnings.warn('OPENAI API is not working properly, will use exact matching for evaluation') + warnings.warn(DEBUG_MESSAGE) + model = None + else: + warnings.warn('OPENAI_API_KEY is not set properly, will use exact matching for evaluation') + model = None + + suffix = eval_file.split('.')[-1] + storage = eval_file.replace(f'.{suffix}', f'_{name_str}.xlsx') + nproc = judge_kwargs.pop('nproc', 4) + + if not osp.exists(storage) and model is not None: + data = load(eval_file) + result_file = eval_file.replace(f'.{suffix}', f'_{name_str}_result.pkl') + + data = load(eval_file) + data = data.sort_values(by='index') + data['prediction'] = [str(x) for x in data['prediction']] + # If not choice label, then use lower case + for k in data.keys(): + data[k.lower() if k not in list(string.ascii_uppercase) else k] = data.pop(k) + + meta = self.data + meta_q_map = {x: y for x, y in zip(meta['index'], meta['question'])} + data_map = {x: y for x, y in zip(data['index'], data['question'])} + for k in data_map: + assert k in meta_q_map, ( + f'eval_file should be the same as or a subset of dataset {self.dataset_name}' + ) + data = mcq_vanilla_eval(model, data, meta, nproc, result_file, self.dataset_name) + + if 'id' in data.columns: + # 更改列名 + data.rename(columns={'id': 'ID'}, inplace=True) + dump(data, storage) + if osp.exists(storage): + accuracy_scores = wemath_evaluate_models(storage) + four_dim_scores = wemath_accuracy(storage) + else: + accuracy_scores = wemath_evaluate_models(eval_file) + four_dim_scores = wemath_accuracy(eval_file) + combine_score = {**accuracy_scores, **four_dim_scores} + combine_score = pd.DataFrame(combine_score) + score_pth = storage.replace('.xlsx', '_score.csv') + dump(combine_score, score_pth) + return combine_score + +class VMCBenchDataset(ImageBaseDataset): + + TYPE = 'MCQ' + + DATASET_URL = { + 'VMCBench_DEV': 'https://huggingface.co/datasets/suyc21/VMCBench/resolve/main/data/tsv/VMCBench_DEV.tsv', + 'VMCBench_TEST': 'https://huggingface.co/datasets/suyc21/VMCBench/resolve/main/data/tsv/VMCBench_TEST.tsv' + } + + DATASET_MD5 = { + } + def build_prompt(self, line): + if isinstance(line, int): + line = self.data.iloc[line] + + if self.meta_only: + tgt_path = toliststr(line['image_path']) + else: + tgt_path = self.dump_image(line) + question = line['question'] + options = { + cand: line[cand] + for cand in string.ascii_uppercase + if cand in line and not pd.isna(line[cand]) + } + options_prompt = 'Options:\n' + for key, item in options.items(): + options_prompt += f'{key}. {item}\n' + prompt = '' + prompt += f'Question: {question}\n' + if len(options): + prompt += options_prompt + prompt += "Answer with the option's letter from the given choices directly. \n" + + msgs = [] + if isinstance(tgt_path, list): + msgs.extend([dict(type='image', value=p) for p in tgt_path]) + else: + msgs = [dict(type='image', value=tgt_path)] + msgs.append(dict(type='text', value=prompt)) + + return msgs + + def evaluate(self, eval_file, **judge_kwargs): + from .utils.vmcbench import get_mc_score, report_vmc_acc + suffix = eval_file.split('.')[-1] + data = load(eval_file) + data = data.sort_values(by='index') + data['prediction'] = [str(x) for x in data['prediction']] + data['hit'] = data.apply(get_mc_score, axis=1) + result_file = eval_file.replace(f'.{suffix}', f'_result.{suffix}') + dump(data, result_file) + acc = report_vmc_acc(data) + score_file = eval_file.replace(f'.{suffix}', '_acc.csv') + dump(acc, score_file) + + return acc diff --git a/vlmeval/dataset/image_mt.py b/vlmeval/dataset/image_mt.py new file mode 100644 index 0000000000000000000000000000000000000000..07658948a01c7e65d857794c92ef32e9c4b94f46 --- /dev/null +++ b/vlmeval/dataset/image_mt.py @@ -0,0 +1,128 @@ +from .image_base import ImageBaseDataset +from .utils.judge_util import build_judge +from ..smp import * +from ..utils import track_progress_rich + + +class ImageMTDataset(ImageBaseDataset): + + TYPE = 'MT' + + def build_prompt(self, line): + if isinstance(line, int): + line = self.data.iloc[line] + + if self.meta_only: + tgt_path = toliststr(line['image_path']) + else: + tgt_path = self.dump_image(line) + + questions = toliststr(line['question']) + if 'answer' in line: + answers = toliststr(line['answer']) + else: + answers = [''] * len(questions) + assert len(questions) == len(answers) + + dlgs, pics_number = [], 0 + for i in range(len(questions)): + q, a = questions[i], answers[i] + if '' in q: + content = [] + tag_number = q.count('') + images = tgt_path[pics_number: pics_number + tag_number] + pics_number += tag_number + q_split = q.split('') + for i in range(tag_number): + qsp, im = q_split[i], images[i] + if qsp != '': + content.append(dict(type='text', value=qsp)) + content.append(dict(type='image', value=im)) + if q_split[-1] != '': + content.append(dict(type='text', value=q_split[-1])) + else: + content = [dict(type='text', value=q)] + dlgs.append(dict(role='user', content=content)) + assert '' not in a, 'We currently do not support images in the answer. ' + content = [dict(type='text', value=a)] + dlgs.append(dict(role='assistant', content=content)) + return dlgs + + +class MMDUDataset(ImageMTDataset): + + DATASET_URL = {'MMDU': 'https://opencompass.openxlab.space/utils/VLMEval/MMDU.tsv'} + DATASET_MD5 = {'MMDU': '848b635a88a078f49aebcc6e39792061'} + DIMS = [ + 'Creativity', 'Richness', 'Visual Perception', 'Logical Coherence', + 'Answer Accuracy', 'Image Relationship Understanding', 'Overall Score' + ] + + def calculat_metric(self, ans): + all = defaultdict(lambda: 0) + tot = defaultdict(lambda: 0) + valid = defaultdict(lambda: 0) + for k in ans: + res = ans[k]['res'] + assert isinstance(res, pd.DataFrame) + lt = len(res) + for i in range(lt): + line = res.iloc[i] + for k in self.DIMS: + tot[k] += 1 + if k in line and line[k] is not None: + try: + score = int(line[k]) + score = np.clip(score, 0, 10) + all[k] += score + valid[k] += 1 + except Exception as e: + print(f'Failed to parse the score: {str(e)}') + sp1 = {'set': 'all'} + sp1.update({k: all[k] / tot[k] * 10 for k in self.DIMS}) + sp2 = {'set': 'valid'} + sp2.update({k: all[k] / valid[k] * 10 for k in self.DIMS}) + + return pd.DataFrame([sp1, sp2]) + + def evaluate(self, eval_file, **judge_kwargs): + suffix = eval_file.split('.')[-1] + model = judge_kwargs['model'] + + tmp_file = eval_file.replace(f'.{suffix}', f'_{model}.pkl') + score_file = eval_file.replace(f'.{suffix}', f'_{model}_score.csv') + nproc = judge_kwargs.pop('nproc', 4) + + data = load(eval_file) + model = judge_kwargs.pop('model', 'gpt-4o') + judge_model = build_judge(model=model, **judge_kwargs) + + lt = len(data) + lines = [data.iloc[i] for i in range(lt)] + tups = [(judge_model, line) for line in lines] + indices = [line['index'] for line in lines] + + ans = {} + if osp.exists(tmp_file): + ans = load(tmp_file) + + tups = [x for x, i in zip(tups, indices) if i not in ans] + indices = [i for i in indices if i not in ans] + + from .utils.mmdu import mmdu_score + + if len(indices): + new_results = track_progress_rich( + mmdu_score, + tups, + nproc=nproc, + chunksize=nproc, + keys=indices, + save=tmp_file,) + ans = load(tmp_file) + for k, v in zip(indices, new_results): + assert k in ans + + metric = self.calculat_metric(ans) + dump(metric, score_file) + return metric diff --git a/vlmeval/dataset/image_shortqa.py b/vlmeval/dataset/image_shortqa.py new file mode 100644 index 0000000000000000000000000000000000000000..acbcb089e1dc7261b6500f2cdf6aeec596c6d94b --- /dev/null +++ b/vlmeval/dataset/image_shortqa.py @@ -0,0 +1,139 @@ +from vlmeval import * +from .image_base import ImageBaseDataset +from .utils import build_judge +from .utils.multiple_choice import report_acc, eval_vanilla, eval_circular_group +from .utils.shortqa import ShortQA_prompt +from ..utils import track_progress_rich + + +def ShortQA_auxeval(model, line): + def proc_str(s): + chs = set(s) + chs = [x for x in chs if x not in string.ascii_letters + ': '] + for ch in chs: + s = s.replace(ch, ' ') + return s + + def extraction(resp): + correct, reason = None, None + correct_st, correct_ed = '[Begin Correctness]', '[End Correctness]' + reason_st, reason_ed = '[Begin Reason]', '[End Reason]' + if correct_st in resp and correct_ed in resp: + correct = resp.split(correct_st)[1].split(correct_ed)[0].strip().lower() + if ('yes' in correct) ^ ('no' in correct): + correct = 1 if 'yes' in correct else 0 + if reason_st in resp and reason_ed in resp: + reason = resp.split(reason_st)[1].split(reason_ed)[0].strip() + return correct, reason + else: + return None, None + else: + return None, None + + prompt = ShortQA_prompt(line) + retry = 3 + for i in range(retry): + output = model.generate(prompt, temperature=0.5 * i) + ans = extraction(output) + # print(output, ans) + if ans[0] in [0, 1]: + return dict(hit=ans[0], log=ans[1]) + + return dict(hit=0, log='Fail to Judge') + + +def Comprehensive_auxeval(model, data): + def valid(record, key_name): + return key_name in record and (not pd.isna(record[key_name])) and record[key_name] != '' + + if isinstance(data, pd.DataFrame) and len(data) > 1: + # Should Adopt CircularEval + assert valid(data.iloc[0], 'A') + data['GT'] = data['answer'] + return eval_circular_group(model, data) + else: + item = data.iloc[0] if isinstance(data, pd.DataFrame) else data + if valid(item, 'A') and len(item['answer']) == 1: + item['GT'] = item['answer'] + return eval_vanilla(model, item) + else: + return ShortQA_auxeval(model, item) + + +class ImageShortQADataset(ImageBaseDataset): + TYPE = 'Short' + + DATASET_URL = { + 'LiveMMBench_Infographic': '', + 'LiveMMBench_Perception': '', + 'LiveMMBench_Reasoning': '', + 'LiveMMBench_Reasoning_circular': '', + } + + DATASET_MD5 = {} + + def build_prompt(self, line): + msgs = super().build_prompt(line) + assert msgs[-1]['type'] == 'text' + msgs[-1]['value'] += '\nPlease directly provide a short answer to the question. ' + return msgs + + # It returns a DataFrame + def evaluate(self, eval_file, **judge_kwargs): + data = load(eval_file) + dataset = self.dataset_name + assert 'answer' in data and 'prediction' in data + data['prediction'] = [str(x) for x in data['prediction']] + data['answer'] = [str(x) for x in data['answer']] + + storage = eval_file.replace('.xlsx', '_judge.xlsx') + tmp_file = eval_file.replace('.xlsx', '_tmp.pkl') + nproc = judge_kwargs.pop('nproc', 4) + + if not osp.exists(storage): + ans_map = {} if not osp.exists(tmp_file) else load(tmp_file) + + model = judge_kwargs.get('model', 'gpt-4o-mini') + if model == 'exact_matching': + model = None + elif gpt_key_set(): + model = build_judge(model=model, **judge_kwargs) + if not model.working(): + warnings.warn('OPENAI API is not working properly, will use exact matching for evaluation') + warnings.warn(DEBUG_MESSAGE) + model = None + else: + model = None + warnings.warn('OPENAI_API_KEY is not working properly, will use exact matching for evaluation') + + if model is not None: + if 'g_index' not in data: + lines = [data.iloc[i] for i in range(len(data))] + indices = [x['index'] for x in lines if x['index'] not in ans_map] + lines = [x for x in lines if x['index'] not in ans_map] + tups = [(model, line) for line in lines] + else: + main_data = data[[x == y for x, y in zip(data['index'], data['g_index'])]] + lines = [data[data['g_index'] == x] for x in main_data['index']] + indices = [x.iloc[0]['g_index'] for x in lines if x.iloc[0]['g_index'] not in ans_map] + lines = [x for x in lines if x.iloc[0]['g_index'] not in ans_map] + tups = [(model, x) for x in lines] + data = main_data + + if len(lines): + res = track_progress_rich( + Comprehensive_auxeval, tups, nproc=nproc, chunksize=nproc, keys=indices, save=tmp_file) + for k, v in zip(indices, res): + ans_map[k] = v + + judge_results = [ans_map[x] for x in data['index']] + data['hit'] = [x['hit'] for x in judge_results] + data['log'] = [x['log'] for x in judge_results] + dump(data, storage) + + data = load(storage) + acc = report_acc(data) + + score_file = eval_file.replace(f'.xlsx', '_acc.csv') + dump(acc, score_file) + return acc diff --git a/vlmeval/dataset/image_vqa.py b/vlmeval/dataset/image_vqa.py new file mode 100644 index 0000000000000000000000000000000000000000..f3de48f0de4f09e9849f93d0af13a47c670e46b5 --- /dev/null +++ b/vlmeval/dataset/image_vqa.py @@ -0,0 +1,1536 @@ +import os +import re +import tempfile +from functools import partial + +import pandas as pd + +from .image_base import ImageBaseDataset +from .utils import build_judge, DEBUG_MESSAGE +from ..smp import * +from ..utils import track_progress_rich + + +class ImageVQADataset(ImageBaseDataset): + TYPE = 'VQA' + + DATASET_URL = { + 'OCRVQA_TEST': 'https://opencompass.openxlab.space/utils/VLMEval/OCRVQA_TEST.tsv', + 'OCRVQA_TESTCORE': 'https://opencompass.openxlab.space/utils/VLMEval/OCRVQA_TESTCORE.tsv', + 'TextVQA_VAL': 'https://opencompass.openxlab.space/utils/VLMEval/TextVQA_VAL.tsv', + 'DocVQA_VAL': 'https://opencompass.openxlab.space/utils/VLMEval/DocVQA_VAL.tsv', + 'DocVQA_TEST': 'https://opencompass.openxlab.space/utils/VLMEval/DocVQA_TEST.tsv', + 'InfoVQA_VAL': 'https://opencompass.openxlab.space/utils/VLMEval/InfoVQA_VAL.tsv', + 'InfoVQA_TEST': 'https://opencompass.openxlab.space/utils/VLMEval/InfoVQA_TEST.tsv', + 'ChartQA_TEST': 'https://opencompass.openxlab.space/utils/VLMEval/ChartQA_TEST.tsv', + 'GQA_TestDev_Balanced': 'https://opencompass.openxlab.space/utils/VLMEval/GQA_TestDev_Balanced.tsv', + } + + DATASET_MD5 = { + 'OCRVQA_TEST': 'ca46a6d74b403e9d6c0b670f6fc00db9', + 'OCRVQA_TESTCORE': 'c5239fe77db8bdc1f2ad8e55e0d1fe97', + 'TextVQA_VAL': 'b233b31f551bbf4056f2f955da3a92cd', + 'DocVQA_VAL': 'd5ee77e1926ff10690d469c56b73eabf', + 'DocVQA_TEST': '6a2f28cac26ef2d3447374e8c6f6c8e9', + 'InfoVQA_VAL': '2342e9c225222f0ef4dec545ebb126fe', + 'InfoVQA_TEST': 'df535bf51b88dc9718252c34131a6227', + 'ChartQA_TEST': 'c902e0aa9be5582a7aad6dcf52734b42', + 'GQA_TestDev_Balanced': '99b62f22e224d9b2f32dcbe41359d1c9', + } + + def build_prompt(self, line): + msgs = super().build_prompt(line) + assert msgs[-1]['type'] == 'text' + msgs[-1]['value'] += '\nAnswer the question using a single word or phrase.' + return msgs + + # It returns a DataFrame + def evaluate(self, eval_file, **judge_kwargs): + from .utils.vqa_eval import hit_calculate, process_line + + data = load(eval_file) + dataset = self.dataset_name + assert 'answer' in data and 'prediction' in data + data['prediction'] = [str(x) for x in data['prediction']] + data['answer'] = [str(x) for x in data['answer']] + lt = len(data) + pool = mp.Pool(16) + lines = [data.iloc[i] for i in range(lt)] + if listinstr(['TextVQA'], dataset): + res = pool.map(partial(process_line, method='vqa_score'), lines) + elif listinstr(['ChartQA'], dataset): + res = pool.map(partial(process_line, method='relaxed_accuracy'), lines) + elif listinstr(['OCRVQA', 'GQA'], dataset): + res = pool.map(partial(process_line, method='accuracy'), lines) + elif listinstr(['DocVQA', 'InfoVQA'], dataset): + res = pool.map(partial(process_line, method='anls'), lines) + else: # default using vqa_score to calculate score + res = pool.map(process_line, lines) + hit = hit_calculate(res, dataset) + ret = dict() + if 'split' in data: + splits = set(data['split']) + for sp in splits: + sub = [r for l, r in zip(lines, res) if l['split'] == sp] + # [np.mean(x['match']) >= full_score_weight for x in sub] + hit = hit_calculate(sub, dataset) + ret[sp] = np.mean(hit) * 100 + sub = [r for l, r in zip(lines, res)] + hit = hit_calculate(sub, dataset) + ret['Overall'] = np.mean(hit) * 100 + else: + ret['Overall'] = np.mean(hit) * 100 + if 'category' in data: + cates = list(set(data['category'])) + cates.sort() + for c in cates: + sub = [r for l, r in zip(lines, res) if l['category'] == c] + # [np.mean(x['match']) >= full_score_weight for x in sub] + hit = hit_calculate(sub, dataset) + ret[c] = np.mean(hit) * 100 + ret = d2df(ret) + ret.round(2) + + suffix = eval_file.split('.')[-1] + result_file = eval_file.replace(f'.{suffix}', '_acc.csv') + dump(ret, result_file) + return ret + + +class VizWiz(ImageBaseDataset): + TYPE = 'VQA' + DATASET_URL = { + 'VizWiz': 'https://opencompass.openxlab.space/utils/VLMEval/VizWiz.tsv' + } + DATASET_MD5 = { + 'VizWiz': 'fa4ac4164467563ed2fac6eac6631bd0' + } + + @classmethod + def evaluate(self, eval_file, **judge_kwargs): + from .utils.vqa_eval import hit_calculate, process_line + + suffix = eval_file.split('.')[-1] + result_file = eval_file.replace(f'.{suffix}', '_acc.csv') + + if not osp.exists(result_file): + data = load(eval_file) + assert 'answers' in data and 'prediction' in data + data['prediction'] = [str(x) for x in data['prediction']] + data['answer'] = [str(x) for x in data['answers']] + + lt = len(data) + pool = mp.Pool(16) + lines = [data.iloc[i] for i in range(lt)] + res = pool.map(process_line, lines) + + hit = hit_calculate(res, 'VizWiz') + ret = dict() + + ret['Overall'] = np.mean(hit) * 100 + ret = d2df(ret) + ret.round(2) + + dump(ret, result_file) + + retz = pd.read_csv(result_file) + return retz + + +class OCRBench(ImageBaseDataset): + TYPE = 'VQA' + DATASET_URL = { + 'OCRBench': 'https://opencompass.openxlab.space/utils/VLMEval/OCRBench.tsv', + # For internal test only + 'OCRBench_MINI': 'https://opencompass.openxlab.space/utils/TEST/OCRBench_MINI.tsv' + } + DATASET_MD5 = {'OCRBench': 'e953d98a987cc6e26ef717b61260b778'} + + # It returns a dictionary + @classmethod + def evaluate(self, eval_file, **judge_kwargs): + OCRBench_score = { + 'Regular Text Recognition': 0, + 'Irregular Text Recognition': 0, + 'Artistic Text Recognition': 0, + 'Handwriting Recognition': 0, + 'Digit String Recognition': 0, + 'Non-Semantic Text Recognition': 0, + 'Scene Text-centric VQA': 0, + 'Doc-oriented VQA': 0, + 'Key Information Extraction': 0, + 'Handwritten Mathematical Expression Recognition': 0, + } + + data = load(eval_file) + lt = len(data) + lines = [data.iloc[i] for i in range(lt)] + for i in tqdm(range(len(lines))): + line = lines[i] + predict = str(line['prediction']) + answers = eval(line['answer']) + category = line['category'] + if category == 'Handwritten Mathematical Expression Recognition': + for j in range(len(answers)): + answer = answers[j].strip().replace('\n', ' ').replace(' ', '') + predict = predict.strip().replace('\n', ' ').replace(' ', '') + if answer in predict: + OCRBench_score[category] += 1 + break + else: + for j in range(len(answers)): + answer = answers[j].lower().strip().replace('\n', ' ') + predict = predict.lower().strip().replace('\n', ' ') + if answer in predict: + OCRBench_score[category] += 1 + break + + final_score_dict = {} + final_score_dict['Text Recognition'] = \ + (OCRBench_score['Regular Text Recognition'] + OCRBench_score['Irregular Text Recognition'] + + OCRBench_score['Artistic Text Recognition'] + OCRBench_score['Handwriting Recognition'] + + OCRBench_score['Digit String Recognition'] + OCRBench_score['Non-Semantic Text Recognition']) + final_score_dict['Scene Text-centric VQA'] = OCRBench_score['Scene Text-centric VQA'] + final_score_dict['Doc-oriented VQA'] = OCRBench_score['Doc-oriented VQA'] + final_score_dict['Key Information Extraction'] = OCRBench_score['Key Information Extraction'] + final_score_dict['Handwritten Mathematical Expression Recognition'] = \ + (OCRBench_score['Handwritten Mathematical Expression Recognition']) + final_score_dict['Final Score'] = \ + (final_score_dict['Text Recognition'] + final_score_dict['Scene Text-centric VQA'] + + final_score_dict['Doc-oriented VQA'] + final_score_dict['Key Information Extraction'] + + final_score_dict['Handwritten Mathematical Expression Recognition']) + final_score_dict['Final Score Norm'] = (float(final_score_dict['Final Score']) / 10) + score_pth = eval_file.replace('.xlsx', '_score.json') + dump(final_score_dict, score_pth) + return final_score_dict + + +class MathVista(ImageBaseDataset): + TYPE = 'VQA' + DATASET_URL = { + 'MathVista_MINI': 'https://opencompass.openxlab.space/utils/VLMEval/MathVista_MINI.tsv' + } + DATASET_MD5 = {'MathVista_MINI': 'f199b98e178e5a2a20e7048f5dcb0464'} + + # It returns a DataFrame + @classmethod + def evaluate(self, eval_file, **judge_kwargs): + from .utils.mathvista import MathVista_auxeval, MathVista_acc + + model = judge_kwargs['model'] + suffix = eval_file.split('.')[-1] + storage = eval_file.replace(f'.{suffix}', f'_{model}.xlsx') + tmp_file = eval_file.replace(f'.{suffix}', f'_{model}.pkl') + nproc = judge_kwargs.pop('nproc', 4) + + if not osp.exists(storage): + data = load(eval_file) + model = build_judge(max_tokens=128, **judge_kwargs) + assert model.working(), ('MathVista evaluation requires a working OPENAI API\n' + DEBUG_MESSAGE) + lt = len(data) + lines = [data.iloc[i] for i in range(lt)] + tups = [(model, line) for line in lines] + indices = [line['index'] for line in lines] + + ans = {} + if osp.exists(tmp_file): + ans = load(tmp_file) + tups = [x for x, i in zip(tups, indices) if i not in ans] + indices = [i for i in indices if i not in ans] + + if len(indices): + new_results = track_progress_rich( + MathVista_auxeval, + tups, + nproc=nproc, + chunksize=nproc, + keys=indices, + save=tmp_file, + ) + ans = load(tmp_file) + for k, v in zip(indices, new_results): + assert k in ans + assert ans[k]['log'] == v['log'] and ans[k]['res'] == v['res'] + + data['res'] = [ans[idx]['res'] for idx in data['index']] + data['log'] = [ans[idx]['log'] for idx in data['index']] + dump(data, storage) + + score = MathVista_acc(storage) + score_pth = storage.replace('.xlsx', '_score.csv') + dump(score, score_pth) + return score + + +class MathVerse(ImageBaseDataset): + TYPE = 'VQA' + DATASET_URL = { + 'MathVerse_MINI': 'http://opencompass.openxlab.space/utils/benchmarks/MathVerse/MathVerse_MINIV.tsv', # noqa + 'MathVerse_MINI_Vision_Only': 'http://opencompass.openxlab.space/utils/benchmarks/MathVerse/MathVerse_MINIVOnly.tsv', # noqa + 'MathVerse_MINI_Vision_Only_cot': 'http://opencompass.openxlab.space/utils/benchmarks/MathVerse/MathVerse_MINIVOnly.tsv', # noqa + 'MathVerse_MINI_Vision_Dominant': 'http://opencompass.openxlab.space/utils/benchmarks/MathVerse/MathVerse_MINIVDom.tsv', # noqa + 'MathVerse_MINI_Vision_Intensive': 'http://opencompass.openxlab.space/utils/benchmarks/MathVerse/MathVerse_MINIVInt.tsv', # noqa + 'MathVerse_MINI_Text_Lite': 'http://opencompass.openxlab.space/utils/benchmarks/MathVerse/MathVerse_MINITLite.tsv', # noqa + 'MathVerse_MINI_Text_Dominant': 'http://opencompass.openxlab.space/utils/benchmarks/MathVerse/MathVerse_MINITDom.tsv', # noqa + } + DATASET_MD5 = { + 'MathVerse_MINI': '5017caca32b7fa110c350a1bea861b65', + 'MathVerse_MINI_Vision_Only': '68a11d4680014ac881fa37adeadea3a4', + 'MathVerse_MINI_Vision_Only_cot': '68a11d4680014ac881fa37adeadea3a4', + 'MathVerse_MINI_Vision_Dominant': 'b8fb63852d261ab2aaefba29cc2414d3', + 'MathVerse_MINI_Vision_Intensive': '01cbd35be202bb0c4873a4186a63bc19', + 'MathVerse_MINI_Text_Lite': '19e4b13bdd30b89a03b2e358bcfefa04', + 'MathVerse_MINI_Text_Dominant': '4f5cd2fa6630ea00bb11d6fde1f6fe6a', + } + + # Given one data record, return the built prompt (a multi-modal message), can override + def build_prompt(self, line): + if isinstance(line, int): + line = self.data.iloc[line] + + if self.meta_only: + tgt_path = toliststr(line['image_path']) + else: + tgt_path = self.dump_image(line) + if 'cot' in self.dataset_name: + question = line['query_cot'] + else: + question = line['question'] + + msgs = [] + if isinstance(tgt_path, list): + msgs.extend([dict(type='image', value=p) for p in tgt_path]) + else: + msgs = [dict(type='image', value=tgt_path)] + msgs.append(dict(type='text', value=question)) + return msgs + + # It returns a DataFrame + @classmethod + def evaluate(self, eval_file, **judge_kwargs): + from .utils.mathverse import MathVerse_auxeval_extract, MathVerse_auxeval_score, MathVerse_acc + + model = judge_kwargs['model'] + suffix = eval_file.split('.')[-1] + storage_extract = eval_file.replace(f'.{suffix}', f'_{model}_extract.xlsx') + tmp_file_extract = eval_file.replace(f'.{suffix}', f'_{model}_extract.pkl') + storage_score = eval_file.replace(f'.{suffix}', f'_{model}_score.xlsx') + tmp_file_score = eval_file.replace(f'.{suffix}', f'_{model}_score.pkl') + nproc = judge_kwargs.pop('nproc', 4) + # stage1: extract the answer + if not osp.exists(storage_extract): + data = load(eval_file) + model = build_judge(max_tokens=128, **judge_kwargs) + assert model.working(), ('MathVerse evaluation requires a working OPENAI API\n' + DEBUG_MESSAGE) + lt = len(data) + lines = [data.iloc[i] for i in range(lt)] + tups = [(model, line) for line in lines] + indices = [line['index'] for line in lines] + + ans = {} + if osp.exists(tmp_file_extract): + ans = load(tmp_file_extract) + tups = [x for x, i in zip(tups, indices) if i not in ans] + indices = [i for i in indices if i not in ans] + + if len(indices): + new_results = track_progress_rich( + MathVerse_auxeval_extract, + tups, + nproc=nproc, + chunksize=nproc, + keys=indices, + save=tmp_file_extract, + ) + ans = load(tmp_file_extract) + for k, v in zip(indices, new_results): + assert k in ans + assert ans[k]['log_extract'] == v['log_extract'] and ans[k]['extract'] == v['extract'] + + data['extract'] = [ans[idx]['extract'] for idx in data['index']] + data['log_extract'] = [ans[idx]['log_extract'] for idx in data['index']] + dump(data, storage_extract) + + # stage2: score the answer + if not osp.exists(storage_score): + data = load(storage_extract) + model = build_judge(max_tokens=128, **judge_kwargs) + assert model.working(), ('MathVerse evaluation requires a working OPENAI API\n' + DEBUG_MESSAGE) + lt = len(data) + lines = [data.iloc[i] for i in range(lt)] + tups = [(model, line) for line in lines] + indices = [line['index'] for line in lines] + + ans = {} + if osp.exists(tmp_file_score): + ans = load(tmp_file_score) + tups = [x for x, i in zip(tups, indices) if i not in ans] + indices = [i for i in indices if i not in ans] + + if len(indices): + new_results = track_progress_rich( + MathVerse_auxeval_score, + tups, + nproc=nproc, + chunksize=nproc, + keys=indices, + save=tmp_file_score, + ) + ans = load(tmp_file_score) + for k, v in zip(indices, new_results): + assert k in ans + assert ans[k]['log_score'] == v['log_score'] and ans[k]['score'] == v['score'] + + data['score'] = [ans[idx]['score'] for idx in data['index']] + data['log_score'] = [ans[idx]['log_score'] for idx in data['index']] + dump(data, storage_score) + + score = MathVerse_acc(storage_score) + score_pth = storage_score.replace('.xlsx', '.csv') + dump(score, score_pth) + return score + + +class MathVision(ImageBaseDataset): + TYPE = 'VQA' + DATASET_URL = { + 'MathVision': 'https://opencompass.openxlab.space/utils/VLMEval/MathVision.tsv', + 'MathVision_MINI': 'https://opencompass.openxlab.space/utils/VLMEval/MathVision_MINI.tsv' + } + DATASET_MD5 = { + 'MathVision': '93f6de14f7916e598aa1b7165589831e', + 'MathVision_MINI': '060fe4fa5d868987ce179307bd5f8a33' + } + + # It returns a DataFrame + @classmethod + def evaluate(self, eval_file, **judge_kwargs): + from .utils.mathv import MATH_V_auxeval, MATH_V_acc + + if 'model' in judge_kwargs: + model = judge_kwargs['model'] + else: + model = os.path.basename(os.environ.get('LOCAL_LLM')) + suffix = eval_file.split('.')[-1] + storage = eval_file.replace(f'.{suffix}', f'_{model}.xlsx') + tmp_file = eval_file.replace(f'.{suffix}', f'_{model}.pkl') + nproc = judge_kwargs.pop('nproc', 4) + + if not osp.exists(storage): + data = load(eval_file) + model = build_judge(max_tokens=128, **judge_kwargs) + assert model.working(), ('MATH-Vision evaluation requires a working OPENAI API\n' + DEBUG_MESSAGE) + lt = len(data) + lines = [data.iloc[i] for i in range(lt)] + tups = [(model, line) for line in lines] + indices = [line['index'] for line in lines] + + ans = {} + if osp.exists(tmp_file): + ans = load(tmp_file) + tups = [x for x, i in zip(tups, indices) if i not in ans] + indices = [i for i in indices if i not in ans] + + if len(indices): + new_results = track_progress_rich( + MATH_V_auxeval, + tups, + nproc=nproc, + chunksize=nproc, + keys=indices, + save=tmp_file, + ) + ans = load(tmp_file) + for k, v in zip(indices, new_results): + assert k in ans + assert ans[k]['log'] == v['log'] and ans[k]['res'] == v['res'] + + data['res'] = [ans[idx]['res'] for idx in data['index']] + data['log'] = [ans[idx]['log'] for idx in data['index']] + dump(data, storage) + + score = MATH_V_acc(storage) + score_pth = storage.replace('.xlsx', '_score.csv') + dump(score, score_pth) + return score + + +class OlympiadBench(ImageBaseDataset): + TYPE = 'VQA_ex_prompt' + DATASET_URL = { + 'OlympiadBench': 'https://opencompass.openxlab.space/utils/VLMEval/OlympiadBench.tsv', + 'OlympiadBench_EN': 'https://opencompass.openxlab.space/utils/VLMEval/OlympiadBench_EN.tsv', + 'OlympiadBench_CN': 'https://opencompass.openxlab.space/utils/VLMEval/OlympiadBench_CN.tsv' + } + DATASET_MD5 = { + 'OlympiadBench': '9735ae0f0299eae1e7d07f5a7feab914', + 'OlympiadBench_EN': '5c68e100d394351fc7049f29d4d4efed', + 'OlympiadBench_CN': 'ea01b16788955702c79650c701e5b623' + } + + def dump_image(self, line): + os.makedirs(self.img_root, exist_ok=True) + + tgt_path_z = [] + if isinstance(line['image'], list): + for i in range(len(line['image'])): + tgt_path = osp.join(self.img_root, f"{line['index']}--{i + 1}.jpg") + if not read_ok(tgt_path): + decode_base64_to_image_file(line['image'][i], tgt_path) + tgt_path_z.append(tgt_path) + else: + tgt_path = osp.join(self.img_root, f"{line['index']}.jpg") + if not read_ok(tgt_path): + decode_base64_to_image_file(line['image'], tgt_path) + tgt_path_z.append(tgt_path) + return tgt_path_z + + def build_prompt(self, line): + + from .utils.olympiadbench import get_answer_type_text, make_input + + self.is_chinese = 'zh' in line['source'] + self.is_math = 'maths' in line['source'] + self.is_theorem_proving = 'TP' in line['source'] + + if self.is_chinese: + subject_content = '数学' if self.is_math else '物理' + if self.is_theorem_proving: + prompt = ( + f"以下是中国{subject_content}竞赛中的证明题。请根据题目的要求,运用逻辑推理及常用定理证明题目中的命题。" + "证明过程中使用的变量和公式请使用LaTeX格式表示。" + ) + else: + answer_type_text = get_answer_type_text(line['answer_type'], is_chinese=True, + multiple_answer=line['is_multiple_answer']) + if line['is_multiple_answer']: + multiple_answer_text = '\\boxed{用英文逗号连接的多个答案}' + else: + multiple_answer_text = '\\boxed{答案}' + unit_text = '' + if line['unit']: + multiple_answer_text += '(单位)' + unit_text = ',注意答案的单位不要放在\\boxed{}中' + prompt = ( + f'以下是中国{subject_content}竞赛中的解答题{answer_type_text}。请根据题目的要求和所提供的信息计算得出答案。' + f'解答过程和结果中使用的变量和公式请使用LaTeX格式表示。请在最后以“所以最终答案是{multiple_answer_text}。”' + f'显式给出结果{unit_text}。' + ) + else: + subject_content = 'Math' if self.is_math else 'Physics' + if self.is_theorem_proving: + prompt = ( + f'The following is a theorem proving problem from an International {subject_content} competition. ' + 'Please use logical reasoning and common theorems to prove the proposition in the problem ' + 'according to the given requirements. ' + 'Please use LaTeX format to represent the variables and formulas used in the proof.' + ) + else: + if line['is_multiple_answer']: + multiple_answer_text = '\\boxed{multiple answers connected with commas}' + else: + multiple_answer_text = '\\boxed{answer}' + unit_text = '' + if line['unit']: + multiple_answer_text += '(unit)' + unit_text = ', note that the unit of the answer should not be included in \\boxed{}' + answer_type_text = get_answer_type_text(line['answer_type'], is_chinese=False, + multiple_answer=line['is_multiple_answer']) + prompt = ( + f'The following is an open-ended problem from an International {subject_content} competition. ' + f'{answer_type_text}Please calculate the answer according to the given requirements and ' + 'the information provided. Please use LaTeX format to represent the variables and formulas ' + 'used in the solution process and results. Please end your solution with "So the final answer ' + f'is {multiple_answer_text}." and give the result explicitly{unit_text}.' + ) + + if self.is_math: + input = make_input(prompt, line['question']) + else: + if 'context' in line.keys() and str(line['context']) != 'nan': # cannot be null + input = make_input(prompt, line['context'] + '\n' + line['question']) + else: + input = make_input(prompt, line['question']) + + ret = [dict(type='text', value=input)] + tgt_path = self.dump_image(line) + + ret.extend([dict(type='image', value=s) for s in tgt_path]) + + return ret + + @classmethod + def evaluate(self, eval_file, **judge_kwargs): + from .utils.olympiadbench import MathJudger, extract_answer + judger = MathJudger() + + suffix = eval_file.split('.')[-1] + name_str1 = 'judge' + name_str2 = 'score' + result_file = eval_file.replace(f'.{suffix}', f'_{name_str1}_result.xlsx') + score_file = eval_file.replace(f'.{suffix}', f'_{name_str2}_result.csv') + + if not osp.exists(result_file): + data = load(eval_file) + scorez = [] + + for i in tqdm(data.iterrows()): + line = i[1] + model_answer = line['prediction'] + is_chinese = 'zh' in line['source'] + model_answer = extract_answer(is_chinese, model_answer, is_deepseek=False) + answer_type = line['answer_type'] + + final_answer = line['final_answer'][2:-2] + + if str(answer_type) != 'nan' and 'Tuple' in answer_type: + judge_result = judger.judge(model_answer, final_answer) + else: + if str(line['error']) != 'nan': + if ',' in line['error']: + precisions = line['error'].split(',') + precisions = [float(p) if p else 1e-8 for p in precisions] + judge_result = judger.judge(model_answer, final_answer, precisions) + else: + precision = float(line['error']) + judge_result = judger.judge(model_answer, final_answer, precision) + else: + judge_result = judger.judge(model_answer, final_answer) + scorez.append(judge_result) + + data['score'] = scorez + dump(data, result_file) + + judge_file = load(result_file) + + if not osp.exists(score_file): + name_list = ['OE_MM_maths_en_COMP', 'OE_MM_maths_zh_CEE', 'OE_MM_maths_zh_COMP', 'OE_MM_physics_en_COMP', + 'OE_MM_physics_zh_CEE','OE_TO_maths_en_COMP', 'OE_TO_maths_zh_CEE', 'OE_TO_maths_zh_COMP', + 'OE_TO_physics_en_COMP', 'OE_TO_physics_zh_CEE'] + + sample_list = [[] for _ in range(len(name_list))] + for i in judge_file.iterrows(): + line = i[1] + for j in range(len(name_list)): + if line['source'] == name_list[j]: + sample_list[j].append(line['score']) + + acc_dict = {} + correct_list = [] + + # fine-grained + for i in range(len(name_list)): + correct_num = 0 + for j in sample_list[i]: + if j: + correct_num += 1 + correct_list.append(correct_num) + acc = 100 * correct_num / len(sample_list[i]) + acc_dict[name_list[i]] = [acc] + + # 4 grained + labela = ['zh', 'en'] + labelb = ['maths', 'physics'] + + grain_list = [[x,y] for x in labela for y in labelb] + for j in grain_list: + dict_name = j[0] + "_" + j[1] + correct_num = 0 + full_num = 0 + for i in range(len(name_list)): + if all(k in name_list[i] for k in j): + correct_num += correct_list[i] + full_num += len(sample_list[i]) + acc = 100 * correct_num / full_num + acc_dict[dict_name] = [acc] + + # 2 grained + grain_list = ['maths', 'physics'] + for j in grain_list: + dict_name = j + correct_num = 0 + full_num = 0 + for i in range(len(name_list)): + if j in name_list[i]: + correct_num += correct_list[i] + full_num += len(sample_list[i]) + acc = 100 * correct_num / full_num + acc_dict[dict_name] = [acc] + + # AVG + correct_num = sum(correct_list) + acc = 100 * correct_num / len(judge_file) + acc_dict['AVG'] = [acc] + + acc_pd = pd.DataFrame(acc_dict) + acc_pd.to_csv(score_file, index=False, encoding='gbk') + + accdz = pd.read_csv(score_file) + return accdz + + +class LogicVista(ImageBaseDataset): + TYPE = 'VQA' + DATASET_URL = { + 'LogicVista': 'https://opencompass.openxlab.space/utils/VLMEval/LogicVista.tsv' + } + DATASET_MD5 = {'LogicVista': '41c5d33adf33765c399e0e6ae588c061'} + + def evaluate(self, eval_file, **judge_kwargs): + from .utils.logicvista import LogicVista_auxeval, evaluate_logicvista + + # model = judge_kwargs['model'] + model = judge_kwargs.get('model', 'exact_matching') + assert model in ['exact_matching', 'gpt-4-0125', 'gpt-4-turbo', 'gpt-4o-mini'], model + name_str_map = {'gpt-4-0125': 'gpt4', 'gpt-4-turbo': 'gpt4-turbo', 'gpt-4o-mini': 'gpt4o-mini'} + name_str = name_str_map[model] if model in name_str_map else model + + if model == 'exact_matching': + model = None + elif gpt_key_set(): + model = build_judge(**judge_kwargs) + if not model.working(): + warnings.warn('OPENAI API is not working properly, will use exact matching for evaluation') + warnings.warn(DEBUG_MESSAGE) + model = None + else: + warnings.warn('OPENAI_API_KEY is not set properly, will use exact matching for evaluation') + model = None + + suffix = eval_file.split('.')[-1] + storage = eval_file.replace(f'.{suffix}', f'_{name_str}.xlsx') + tmp_file = eval_file.replace(f'.{suffix}', f'_{name_str}.pkl') + nproc = judge_kwargs.pop('nproc', 4) + + if not osp.exists(storage) and model is not None: + data = load(eval_file) + model = build_judge(max_tokens=128, **judge_kwargs) + assert model.working(), ('LogicVista evaluation requires a working OPENAI API\n' + DEBUG_MESSAGE) + lt = len(data) + lines = [data.iloc[i] for i in range(lt)] + tups = [(model, line) for line in lines] + indices = [line['index'] for line in lines] + + ans = {} + if osp.exists(tmp_file): + ans = load(tmp_file) + tups = [x for x, i in zip(tups, indices) if i not in ans] + indices = [i for i in indices if i not in ans] + + if len(indices): + new_results = track_progress_rich( + LogicVista_auxeval, + tups, + nproc=nproc, + chunksize=nproc, + keys=indices, + save=tmp_file, + ) + ans = load(tmp_file) + for k, v in zip(indices, new_results): + assert k in ans + assert ans[k]['log'] == v['log'] and ans[k]['res'] == v['res'] and ans[k]['hit'] == v['hit'] + + data['res'] = [ans[idx]['res'] for idx in data['index']] + data['log'] = [ans[idx]['log'] for idx in data['index']] + data['hit'] = [ans[idx]['hit'] for idx in data['index']] + + dump(data, storage) + if osp.exists(storage): + accuracy_scores = evaluate_logicvista(storage) + score_pth = storage.replace('.xlsx', '_score.csv') + dump(accuracy_scores, score_pth) + + return accuracy_scores + +class MME_CoT(ImageBaseDataset): + TYPE = 'VQA' + DATASET_URL = { + 'MME_CoT_TEST': 'https://huggingface.co/datasets/CaraJ/MME-CoT_VLMEvalKit/resolve/main/MME-CoT.tsv' # noqa + } + DATASET_MD5 = { + 'MME_CoT_TEST': 'a612dee0f2d702e01fe50267201302e0', + } + + def split_MME_CoT(self, msgs): + text, images = None, [] + + # Separate images and text from msgs + for s in msgs: + if s['type'] == 'image': + images.append(s['value']) + elif s['type'] == 'text': + assert text is None # Ensure only one text entry is expected + text = s['value'] + + # Split text by tags + text_segs = text.split('') + + # Initialize the segments list + segs = [] + # Iterate through the text segments and images + for i, seg in enumerate(text_segs): + # Append the image if this is not the first segment and there are still images left + if i > 0 and i - 1 < len(images): + segs.append(dict(type='image', value=images[i - 1])) + # Append the text segment (if it's non-empty) + if len(seg.strip()) > 0: + segs.append(dict(type='text', value=seg)) + + return segs + + def dump_image(self, line): + os.makedirs(self.img_root, exist_ok=True) + + if 'image' in line: + if isinstance(line['image'], list): + tgt_path = [] + if 'image_path' in line: + image_path_list = line['image_path'] + else: + image_path_list = [f"{line['index']}--{i + 1}.jpg" for i in range(len(line['image']))] + for img, im_name in zip(line['image'], image_path_list): + path = osp.join(self.img_root, im_name) + if not read_ok(path): + decode_base64_to_image_file(img, path) + tgt_path.append(path) + else: + tgt_path = osp.join(self.img_root, f"{line['index']}.jpg") + if not read_ok(tgt_path): + decode_base64_to_image_file(line['image'], tgt_path) + tgt_path = [tgt_path] + else: + assert 'image_path' in line + tgt_path = toliststr(line['image_path']) + + return tgt_path + + def build_prompt(self, line): + + if isinstance(line, int): + line = self.data.iloc[line] + + tgt_path = self.dump_image(line) + + prompt = line['question'] + + options = { + cand: line[cand] + for cand in string.ascii_uppercase + if cand in line and not pd.isna(line[cand]) + } + prompt = prompt + '\n' + '\n'.join([f'{key}. {item}' for key, item in options.items()]) + + # add cot prompt + if os.environ.get('USE_COT_PROMPT', '1') == '1': + prompt += "\nPlease generate a step by step answer, include all your intermediate reasoning process, and provide the final answer at the end." + else: + prompt += "\nPlease directly provide the final answer without any other output." + + msgs = [] + if isinstance(tgt_path, list): + msgs.extend([dict(type='image', value=p) for p in tgt_path]) + else: + msgs = [dict(type='image', value=tgt_path)] + msgs.append(dict(type='text', value=prompt)) + + msgs = self.split_MME_CoT(msgs) + return msgs + + # It returns a DataFrame + @classmethod + def evaluate(self, eval_file, **judge_kwargs): + print("\033[1;31;40m" + "[MME-CoT Evaluation]: Please refer to the official repository for evaluation: https://github.com/CaraJ7/MME-CoT/tree/main" + "\033[0m") + dummy_result = dict( + dummy_result=0 + ) + return pd.DataFrame(dummy_result, index=[0]) + + + +class LLaVABench(ImageBaseDataset): + TYPE = 'VQA' + DATASET_URL = {'LLaVABench': 'https://opencompass.openxlab.space/utils/VLMEval/LLaVABench.tsv'} + DATASET_MD5 = {'LLaVABench': 'd382a093f749a697820d3dadd61c8428'} + + # It returns a DataFrame + @classmethod + def evaluate(self, eval_file, **judge_kwargs): + from .utils.llavabench import ( + build_prompt, + LLaVABench_atomeval, + LLaVABench_score, + ) + + suffix = '.' + eval_file.split('.')[-1] + record_file = eval_file.replace(suffix, '_openai_result' + suffix) + score_file = eval_file.replace(suffix, '_score.csv') + nproc = judge_kwargs.pop('nproc', 4) + system_prompt = 'You are a helpful and precise assistant for checking the quality of the answer.' + + if not osp.exists(record_file): + data = load(eval_file) + lines = [data.iloc[i] for i in range(len(data))] + model = build_judge(temperature=0.2, system_prompt=system_prompt, **judge_kwargs) + assert model.working(), ('LLaVABench evaluation requires a working OPENAI API\n' + DEBUG_MESSAGE) + + prompts = [build_prompt(line) for line in lines] + tups = [(model, prompt) for prompt in prompts] + scores = track_progress_rich(LLaVABench_atomeval, tups, nproc=nproc, chunksize=nproc) + data['gpt4_score'] = [x[0] for x in scores] + data['score'] = [x[1] for x in scores] + dump(data, record_file) + + data = load(record_file) + ret = LLaVABench_score(data).round(1) + dump(ret, score_file) + return ret + + +class MMVet(ImageBaseDataset): + TYPE = 'VQA' + DATASET_URL = { + 'MMVet': 'https://opencompass.openxlab.space/utils/VLMEval/MMVet.tsv', + 'MMVet_Hard': 'http://opencompass.openxlab.space/utils/VLMEval/MMVet_Hard.tsv' + } + DATASET_MD5 = {'MMVet': '748aa6d4aa9d4de798306a63718455e3', 'MMVet_Hard': '63a598819a936a2e77c410a78a21ff16'} + + # It returns a DataFrame + @classmethod + def evaluate(self, eval_file, **judge_kwargs): + from .utils.mmvet import MMVet_auxeval, MMVet_acc + + suffix = eval_file.split('.')[-1] + model = judge_kwargs['model'] + storage = eval_file.replace(f'.{suffix}', f'_{model}.xlsx') + tmp_file = eval_file.replace(f'.{suffix}', f'_{model}.pkl') + nproc = judge_kwargs.pop('nproc', 4) + if not osp.exists(storage): + data = load(eval_file) + model = build_judge(max_tokens=3, **judge_kwargs) + assert model.working(), ('MMVet evaluation requires a working OPENAI API\n' + DEBUG_MESSAGE) + + lt = len(data) + lines = [data.iloc[i] for i in range(lt)] + tups = [(model, line) for line in lines] + indices = [line['index'] for line in lines] + + ans = load(tmp_file) if osp.exists(tmp_file) else {} + tups = [x for x, i in zip(tups, indices) if i not in ans] + indices = [i for i in indices if i not in ans] + + if len(indices): + new_results = track_progress_rich( + MMVet_auxeval, + tups, + nproc=nproc, + chunksize=nproc, + keys=indices, + save=tmp_file, + ) + ans = load(tmp_file) + for k, v in zip(indices, new_results): + assert k in ans + assert ans[k]['log'] == v['log'] and ans[k]['score'] == v['score'] + data['score'] = [ans[idx]['score'] for idx in data['index']] + data['log'] = [ans[idx]['log'] for idx in data['index']] + dump(data, storage) + + score, score_fine = MMVet_acc(storage) + score_pth = storage.replace('.xlsx', '_score.csv') + score_fine_pth = storage.replace('.xlsx', '_score_fine.csv') + dump(score, score_pth) + dump(score_fine, score_fine_pth) + return score + + +class MTVQADataset(ImageBaseDataset): + TYPE = 'VQA' + DATASET_URL = {'MTVQA_TEST': 'https://opencompass.openxlab.space/utils/VLMEval/MTVQA_TEST.tsv'} + DATASET_MD5 = {'MTVQA_TEST': 'd87c17dbab934b7cd89c0a3c1c5657f4'} + + @classmethod + def evaluate(self, eval_file, **judge_kwargs): + data = load(eval_file) + assert 'answer' in data and 'prediction' in data and 'category' in data + data['prediction'] = [str(x) for x in data['prediction']] + data['answer'] = [str(x) for x in data['answer']] + if 'split' in data: + assert np.all([x.lower() == 'test' for x in data['split']]), 'We only support MTVQA_TEST for now. ' + lt = len(data) + category_scores = defaultdict(list) + for i in range(lt): + line = data.iloc[i] + ans = line['answer'].strip().lower().replace('.', '') + pred = line['prediction'].strip().lower().replace('.', '') + cate = line['category'] + score = 1.0 if ans in pred else 0.0 + category_scores[cate].append(score) + category_scores['Average'].append(score) + # Calculate the average score for each category, the score is normalized to [0, 100] + category_averages = {category: np.mean(scores) * 100 for category, scores in category_scores.items()} + + suffix = eval_file.split('.')[-1] + result_file = eval_file.replace(f'.{suffix}', '_acc.json') + dump(category_averages, result_file) + + return category_averages + + # MT-VQA adopts a custom prompt + def build_prompt(self, line): + msgs = super().build_prompt(line) + assert sum([x['type'] == 'text' for x in msgs]) == 1 + for item in msgs: + if item['type'] == 'text': + item['value'] += '\nAnswer the question using a word or phrase in the language of the question.' + return msgs + + +class TableVQABench(ImageBaseDataset): + TYPE = 'VQA' + DATASET_URL = { + 'TableVQABench': 'https://pai-aigc-photog.oss-cn-hangzhou.aliyuncs.com/mentor-vil/datasets/tablevqa-bench.tsv' + } + DATASET_MD5 = {'TableVQABench': '2550adc61bdc82d8e62f3b003de7c62d'} + + from .utils.tablevqabench import FINTABNETQA_PROMPT, VTABFACT_PROMPT, VWTQ_PROMPT + + # It returns a DataFrame + @classmethod + def evaluate(self, eval_file, **judge_kwargs): + import pandas as pd + from .utils.tablevqabench import evaluate_fintabnet, evaluate_tabfact, evaluate_wtq + + data = load(eval_file) + assert 'answer' in data and 'prediction' in data + + data['prediction'] = data['prediction'].str.replace('^Answer: ', '', regex=True) + data_group = dict(tuple(data.groupby('split'))) + eval_result = {'split': [], 'average_scores': []} + for split in ['fintabnetqa', 'vtabfact', 'vwtq', 'vwtq_syn']: + data_split = data_group[split].to_dict(orient='records') + if split == 'fintabnetqa': + split_eval_meta = evaluate_fintabnet(data_split, ['accuracy']) + elif split == 'vtabfact': + split_eval_meta = evaluate_tabfact(data_split, ['accuracy']) + elif split == 'vwtq' or split == 'vwtq_syn': + split_eval_meta = evaluate_wtq(data_split, ['accuracy']) + eval_result['split'].append(split) + eval_result['average_scores'].append(split_eval_meta['average_scores']) + + suffix = eval_file.split('.')[-1] + result_file = eval_file.replace(f'.{suffix}', '_acc.csv') + eval_result = pd.DataFrame(eval_result) + dump(eval_result, result_file) + + return eval_result + + # TableVQABench adopts a custom prompt + def build_prompt(self, line): + msgs = super().build_prompt(line) + assert sum([x['type'] == 'text' for x in msgs]) == 1 + for item in msgs: + if item['type'] == 'text': + if line['split'] == 'fintabnetqa': + item['value'] = self.FINTABNETQA_PROMPT.format_map({'question': item['value']}) + elif line['split'] == 'vtabfact': + item['value'] = self.VTABFACT_PROMPT.format_map({'question': item['value']}) + elif line['split'] == 'vwtq_syn' or line['split'] == 'vwtq': + item['value'] = self.VWTQ_PROMPT.format_map({'question': item['value']}) + return msgs + + +class CustomVQADataset(ImageBaseDataset): + TYPE = 'VQA' + + def load_data(self, dataset): + data_path = osp.join(LMUDataRoot(), f'{dataset}.tsv') + + if file_size(data_path, 'GB') > 1: + local_path = data_path.replace('.tsv', '_local.tsv') + if not osp.exists(local_path) or os.environ.get('FORCE_LOCAL', None): + from ..tools import LOCALIZE + + LOCALIZE(data_path, local_path) + data_path = local_path + return load(data_path) + + def evaluate(self, eval_file, **judge_kwargs): + raise NotImplementedError + + +class CRPE(ImageBaseDataset): + TYPE = 'VQA' + DATASET_URL = { + 'CRPE_EXIST': 'https://huggingface.co/datasets/petter12321/crpe_vlmevalkit/resolve/main/CRPE_EXIST.tsv', + 'CRPE_RELATION': 'https://huggingface.co/datasets/petter12321/crpe_vlmevalkit/resolve/main/CRPE_RELATION.tsv' + } + DATASET_MD5 = { + 'CRPE_EXIST': '315584e23ac1ff7f8719ed3b7ad90f08', + 'CRPE_RELATION': 'bad7094cde0b572288f4b119c2d0c656'} + + @classmethod + def evaluate(self, eval_file, **judge_kwargs): + from .utils.crpe import is_correct + # find-image, count-text, find-text, + # infer-choose, count-image, visual-reasoning + score = { + 'exist': 0, + 'subject': 0, + 'predicate': 0, + 'object': 0, + 'total': 0, + } + num = { + 'exist': 0, + 'subject': 0, + 'predicate': 0, + 'object': 0, + 'total': 0, + } + final_score_dict = { + 'exist': 0, + 'subject': 0, + 'predicate': 0, + 'object': 0, + 'total': 0, + } + data = load(eval_file) + lt = len(data) + lines = [data.iloc[i] for i in range(lt)] + for i in tqdm(range(len(lines))): + line = lines[i] + predict = str(line['prediction']) + answers = str(line['answer']) + # print("predict =", predict) + # print("answers =", answers) + category = line['category'] + if is_correct(answers, predict): + score[category] += 1 + score['total'] += 1 + num[category] += 1 + num['total'] += 1 + + for category in ['exist', 'subject', 'predicate', 'object', 'total']: + if num[category] != 0: + final_score_dict[category] = score[category] / num[category] + else: + final_score_dict[category] = None + + score_pth = eval_file.replace('.xlsx', '_score.json') + dump(final_score_dict, score_pth) + return final_score_dict + + def build_prompt(self, line): + ROOT = LMUDataRoot() + msgs = super().build_prompt(line) + for msg in msgs: + if msg['type'] == 'image': + msg['value'] = osp.join(osp.join(ROOT, 'images', self.dataset_name), msg['value']) + return msgs + + +class QSpatial(ImageBaseDataset): + TYPE = 'VQA' + DATASET_URL = { + 'QSpatial_plus': '', + 'QSpatial_scannet': '' + } + + # NOTE: To evaluate Q-Spatial-ScanNet, you need to get the permission from ScanNet website + # Once you get the permission, you can use the helper code here to download and extract necessary images: + # https://github.com/andrewliao11/Q-Spatial-Bench-code?tab=readme-ov-file#for-qspatial_scannet + qspatial_root = "TO_BE_REPLACED_WITH_THE_PATH_TO_QSPATIAL_DATASET" + url = "https://raw.githubusercontent.com/andrewliao11/Q-Spatial-Bench-code/refs/heads/main/prompt_templates/" + + def post_build(self, dataset): + # Download the prompt templates from github + + links = [ + self.url + "system_prompt.txt", + self.url + "spatial_prompt_single.txt", + self.url + "spatial_prompt_steps.txt", + self.url + "standard_prompt.txt", + self.url + "zero_shot_prompt.txt" + ] + with tempfile.TemporaryDirectory() as temp_dir: + for link in links: + tgt_path = os.path.join(temp_dir, link.split("/")[-1]) + os.system(f"wget {link} -O {tgt_path}") + + self.system_prompt = open(os.path.join(temp_dir, "system_prompt.txt")).read() + self._prompt_templates = dict( + spatial_prompt_single=open(os.path.join(temp_dir, "spatial_prompt_single.txt")).read(), + spatial_prompt_steps=open(os.path.join(temp_dir, "spatial_prompt_steps.txt")).read(), + standard_prompt=open(os.path.join(temp_dir, "standard_prompt.txt")).read(), + zero_shot_prompt=open(os.path.join(temp_dir, "zero_shot_prompt.txt")).read(), + ) + + # Given one data record, return the built prompt (a multi-modal message), can override + def build_prompt(self, line): + from jinja2.sandbox import SandboxedEnvironment + text_prompt_template = self._prompt_templates["spatial_prompt_single"] + env = SandboxedEnvironment() + text_prompt = env.from_string(text_prompt_template).render(question=line["question"]) + tgt_path = self.dump_image(line) + + msgs = [] + if isinstance(tgt_path, list): + msgs.extend([dict(type='image', value=p) for p in tgt_path]) + else: + msgs = [dict(type='image', value=tgt_path)] + + msgs.append(dict(type='text', value=f"{self.system_prompt}\n{text_prompt}")) + return msgs + + # Given the dataset name, return the dataset as a pandas dataframe, can override + def load_data(self, dataset): + import io + import pandas as pd + from datasets import load_dataset + + hf_dataset = load_dataset("andrewliao11/Q-Spatial-Bench", split=dataset) + df = hf_dataset.to_pandas() + + df.reset_index(drop=True, inplace=True) + df['index'] = df.index + df['answer'] = list(zip(df['answer_value'], df['answer_unit'])) + df = df[['index'] + [col for col in df.columns if col != 'index']] + + if dataset == "QSpatial_scannet": + df = df.drop(columns=["image"]) + df["image"] = [Image.open(os.path.join(self.qspatial_root, image_path)) for image_path in df["image_path"]] + else: + df["image"] = [Image.open(io.BytesIO(image_dict["bytes"])) for image_dict in df["image"]] + + df["image"] = [encode_image_to_base64(image) for image in df["image"]] + return df + + @classmethod + def get_multiplier(self, unit): + + unit = unit.lower() + if unit in ["meters", "meter", "m", "metre", "metres"]: + multiplier = 100 + elif unit in ["centimeters", "centimeter", "cm"]: + multiplier = 1 + elif unit in ["feet", "foot", "ft"]: + multiplier = 30.48 + elif unit in ["inch", "inches", "in"]: + multiplier = 2.54 + elif unit in ["mm"]: + multiplier = 0.1 + else: + print(f"Unknown unit: {unit}") + multiplier = 0. + + return multiplier + + @classmethod + def parse_string(self, input_str): + # Regular expression to match the pattern (number or range, text) + match = re.match(r'\(([\d.-]+), (.+)\)', input_str) + if match: + number_part = match.group(1) + text = match.group(2) + + if '-' in number_part: + start, end = map(float, number_part.split('-')) + number = (start + end) / 2 + else: + number = float(number_part) + + return number * self.get_multiplier(text) + else: + print(f"Unable to parse the input string {input_str}") + return 0 + + @classmethod + def parse_prediction(self, vlm_response): + # Value + pattern = r'scalar{([^}]*)}' + str_inside_scalar_boxes = re.findall(pattern, vlm_response)[-1] + scalar_list = re.findall(r'\d+\.?\d*', str_inside_scalar_boxes) + parsed_scalar = np.array(scalar_list).astype(float).mean() + + # Unit + pattern = r'distance_unit{([^}]*)}' + str_inside_unit_boxes = re.findall(pattern, vlm_response) + parsed_unit = str_inside_unit_boxes[-1] + + pred_value_in_cms = parsed_scalar * self.get_multiplier(parsed_unit) + return pred_value_in_cms + + # It returns a dictionary + @classmethod + def evaluate(self, eval_file, **judge_kwargs): + + data = load(eval_file) + if "model" in judge_kwargs: + from .utils.qspatial import QSpatial_auxeval + + # extract using model + model = judge_kwargs['model'] + suffix = eval_file.split('.')[-1] + storage = eval_file.replace(f'.{suffix}', f'_{model}.xlsx') + tmp_file = eval_file.replace(f'.{suffix}', f'_{model}.pkl') + nproc = judge_kwargs.pop('nproc', 4) + + if not osp.exists(storage): + model = build_judge(max_tokens=128, **judge_kwargs) + + assert model.working(), ('Evaluation requires a working OPENAI API\n' + DEBUG_MESSAGE) + lt = len(data) + lines = [data.iloc[i] for i in range(lt)] + tups = [(model, line) for line in lines] + indices = [line['index'] for line in lines] + + ans = {} + if osp.exists(tmp_file): + ans = load(tmp_file) + tups = [x for x, i in zip(tups, indices) if i not in ans] + indices = [i for i in indices if i not in ans] + + if len(indices): + new_results = track_progress_rich( + QSpatial_auxeval, + tups, + nproc=nproc, + chunksize=nproc, + keys=indices, + save=tmp_file, + ) + ans = load(tmp_file) + for k, v in zip(indices, new_results): + assert k in ans + assert ans[k]['log'] == v['log'] and ans[k]['res'] == v['res'] + + data['res'] = [ans[idx]['res'] for idx in data['index']] + data['log'] = [ans[idx]['log'] for idx in data['index']] + dump(data, storage) + + data = load(storage) + + pred_value_in_cms = [] + for res in data["res"]: + try: + pred_value_in_cms.append(self.parse_string(res)) + except ValueError: + pred_value_in_cms.append(0.) + + pred_value_in_cms = np.array(pred_value_in_cms) + 1e-8 + else: + # regex parsing + pred_value_in_cms = [] + n_errors_in_parsing = 0 + for pred in data["prediction"]: + try: + parsed_value = self.parse_prediction(pred) + except IndexError: + n_errors_in_parsing += 1 + parsed_value = 1e-8 + + pred_value_in_cms.append(parsed_value) + + print(f"Encounter {n_errors_in_parsing} errors in parsing") + pred_value_in_cms = np.array(pred_value_in_cms) + 1e-8 + + # Ground truth + ground_truth_value_in_cms = [] + for answer in data["answer"]: + value, unit = eval(answer) + ground_truth_value_in_cms.append(value * self.get_multiplier(unit)) + ground_truth_value_in_cms = np.array(ground_truth_value_in_cms) + 1e-8 + + # Calculate the score + pred_gt = pred_value_in_cms / ground_truth_value_in_cms + gt_pred = ground_truth_value_in_cms / pred_value_in_cms + delta_2 = np.stack([pred_gt, gt_pred]).max(0) < 2. + delta_1_point_5 = np.stack([pred_gt, gt_pred]).max(0) < 1.5 + + data["eval_score_delta_2"] = delta_2 + data["eval_score_delta_1_point_5"] = delta_1_point_5 + + final_score_dict = { + "delta_2": delta_2.mean(), + "delta_1_point_5": delta_1_point_5.mean() + } + for question_type in set(data["question_type"]): + filtered_data = data[data["question_type"] == question_type] + delta_2_per_question_type = filtered_data["eval_score_delta_2"].mean() + delta_1_point_5_per_question_type = filtered_data["eval_score_delta_1_point_5"].mean() + final_score_dict.update({f"{question_type}_delta_2": delta_2_per_question_type}) + final_score_dict.update({f"{question_type}_delta_1_point_5": delta_1_point_5_per_question_type}) + + score_pth = eval_file.replace('.xlsx', '_score.json') + dump(final_score_dict, score_pth) + return final_score_dict + + +class MMNIAH(ImageBaseDataset): + TYPE = 'VQA' + DATASET_URL = { + 'MM_NIAH_VAL': + 'https://huggingface.co/datasets/petter12321/MM-NIAH-VLMEvalKit/resolve/main/MM_NIAH_VAL.tsv', + 'MM_NIAH_TEST': + ['https://huggingface.co/datasets/petter12321/MM-NIAH-VLMEvalKit/resolve/main/part-aa', + 'https://huggingface.co/datasets/petter12321/MM-NIAH-VLMEvalKit/resolve/main/part-ab', + 'https://huggingface.co/datasets/petter12321/MM-NIAH-VLMEvalKit/resolve/main/part-ac', + 'https://huggingface.co/datasets/petter12321/MM-NIAH-VLMEvalKit/resolve/main/part-ad', + 'https://huggingface.co/datasets/petter12321/MM-NIAH-VLMEvalKit/resolve/main/part-ae']} + DATASET_MD5 = {'MM_NIAH_VAL': '27e5a8c3cef7746cb38f89cd86c474c5', + 'MM_NIAH_TEST': 'f490eb2a43096307465fe9e7ef13497c'} + + def prepare_tsv(self, url, file_md5=None): + import os + data_root = LMUDataRoot() + os.makedirs(data_root, exist_ok=True) + update_flag = False + file_name = 'MM_NIAH_VAL.tsv' if 'MM_NIAH_VAL' in url else 'MM_NIAH_TEST.tsv' + data_path = osp.join(data_root, file_name) + if osp.exists(data_path) and (file_md5 is None or md5(data_path) == file_md5): + pass + elif file_name == 'MM_NIAH_TEST.tsv': + warnings.warn('The dataset tsv is not downloaded') + for i in range(len(url)): + if osp.exists(osp.join(data_root, 'part-a' + chr(ord('a') + i))): + print('part_a' + chr(ord('a') + i) + ' is existed') + continue + download_file(url[i], data_path) + file_prefix = 'part-' + output_file = data_path + split_files = sorted([f for f in os.listdir(data_root) if f.startswith(file_prefix)]) + with open(output_file, 'wb') as outfile: + # 逐个读取每个拆分文件并写入到输出文件 + for filename in split_files: + with open(osp.join(data_root, filename), 'rb') as infile: + outfile.write(infile.read()) + update_flag = True + else: + warnings.warn('The dataset tsv is not downloaded') + download_file(url, data_path) + update_flag = True + + if file_size(data_path, 'GB') > 1: + local_path = data_path.replace('.tsv', '_local.tsv') + if not osp.exists(local_path) or os.environ.get('FORCE_LOCAL', None) or update_flag: + from ..tools import LOCALIZE + LOCALIZE(data_path, local_path) + data_path = local_path + return load(data_path) + + @classmethod + def evaluate(self, eval_file, **judge_kwargs): + from .utils.mmniah import is_correct + # find-image, count-text, find-text, + # infer-choose, count-image, visual-reasoning + MMNIAH_score = { + 'count-text': 0, + 'find-image': 0, + 'find-text': 0, + 'infer-choose': 0, + 'count-image': 0, + 'visual-reasoning': 0, + 'total': 0, + } + MMNIAH_num = { + 'count-text': 0, + 'find-image': 0, + 'find-text': 0, + 'infer-choose': 0, + 'count-image': 0, + 'visual-reasoning': 0, + 'total': 0, + } + final_score_dict = { + 'count-text': 0, + 'find-image': 0, + 'find-text': 0, + 'infer-choose': 0, + 'count-image': 0, + 'visual-reasoning': 0, + 'total': 0, + } + data = load(eval_file) + lt = len(data) + lines = [data.iloc[i] for i in range(lt)] + for i in tqdm(range(len(lines))): + line = lines[i] + predict = line['prediction'] + answers = line['answer'] + category = line['category'] + if category in ['visual-reasoning', 'find-image']: + answers = int(answers) + if is_correct(answers, predict): + MMNIAH_score[category] += 1 + MMNIAH_score['total'] += 1 + MMNIAH_num[category] += 1 + MMNIAH_num['total'] += 1 + + for category in ['find-image', 'count-text', 'find-text', + 'infer-choose', 'count-image', 'visual-reasoning', 'total']: + if MMNIAH_num[category] != 0: + final_score_dict[category] = MMNIAH_score[category] / MMNIAH_num[category] + else: + final_score_dict[category] = None + + score_pth = eval_file.replace('.xlsx', '_score.json') + dump(final_score_dict, score_pth) + return final_score_dict + + def build_prompt(self, line): + msgs = super().build_prompt(line) + if isinstance(line, int): + line = self.data.iloc[line] + totalchoice = line['multi-choice options'] + totalchoice = eval(totalchoice) + # find-image, count-text, find-text, + # infer-choose, count-image, visual-reasoning + context = msgs[-1]['value'] + context = eval(context) + question = context[0] + '\n' + context[1] + # tgt_path是所有图像地址列表 + tgt_path = [] + for i in range(len(msgs) - 1): + tgt_path.append(msgs[i]['value']) + choices = totalchoice[0] + choices_image = totalchoice[1] + if choices: + for c_idx, c in enumerate(choices): + question = f"{question}\n{chr(c_idx + ord('A'))}. {c}" + question += "\nAnswer with the option's letter from the given choices directly." + elif choices_image: + for c_idx in range(len(choices_image)): + question = f"{question}\n{chr(c_idx + ord('A'))}. " + question += "\nAnswer with the option's letter from the given choices directly." + else: + question += '\nAnswer the question using a single word or phrase.' + question = '' + question + '' + question = question.split('') + if choices_image: + for i in range(len(question) - 5): + question[i] = question[i] + '\n' + for i in range(len(question) - 5, len(question) - 1): + question[i] = question[i] + '' + else: + for i in range(len(question) - 1): + question[i] = question[i] + '\n' + assert len(tgt_path) + 1 == len(question) + context = [] + for i in range(len(tgt_path)): + context.append(question[i]) + context.append(tgt_path[i]) + context.append(question[-1]) + context[0] = context[0][7:] + context[-1] = context[-1][:-5] + msgs = [] + for i in range(len(context)): + if i % 2 == 0: + msgs.append(dict(type='text', value=context[i])) + else: + ROOT = LMUDataRoot() + msgs.append(dict(type='image', value=osp.join(osp.join(ROOT, 'images', self.dataset_name), context[i]))) + for element in msgs: + if element['value'] == '': + msgs.remove(element) + return msgs diff --git a/vlmeval/dataset/image_yorn.py b/vlmeval/dataset/image_yorn.py new file mode 100644 index 0000000000000000000000000000000000000000..46083e6c3b8147901448a8919d20d3e58dfc2b9f --- /dev/null +++ b/vlmeval/dataset/image_yorn.py @@ -0,0 +1,95 @@ +from ..smp import * +from ..utils import * +from .image_base import ImageBaseDataset +from .utils import build_judge, DEBUG_MESSAGE + + +class ImageYORNDataset(ImageBaseDataset): + + TYPE = 'Y/N' + + DATASET_URL = { + 'MME': 'https://opencompass.openxlab.space/utils/VLMEval/MME.tsv', + 'HallusionBench': 'https://opencompass.openxlab.space/utils/VLMEval/HallusionBench.tsv', + 'POPE': 'https://opencompass.openxlab.space/utils/VLMEval/POPE.tsv', + 'AMBER': 'https://huggingface.co/datasets/yifanzhang114/AMBER_base64/resolve/main/AMBER.tsv', + } + + DATASET_MD5 = { + 'MME': 'b36b43c3f09801f5d368627fb92187c3', + 'HallusionBench': '0c23ac0dc9ef46832d7a24504f2a0c7c', + 'POPE': 'c12f5acb142f2ef1f85a26ba2fbe41d5', + 'AMBER': '970d94c0410916166e0a76ba75da7934', + } + + # It returns a dataframe + def evaluate(self, eval_file, **judge_kwargs): + from .utils.yorn import YOrN_Extraction, YOrN_auxeval + from .utils.yorn import default_rating, MME_rating, Hallusion_rating, POPE_rating, AMBER_rating + + dataset = self.dataset_name + data = load(eval_file) + data['prediction'] = [str(x) for x in data['prediction']] + storage = eval_file.replace('.xlsx', '_auxmatch.xlsx') + tmp_file = eval_file.replace('.xlsx', '_tmp.pkl') + nproc = judge_kwargs.pop('nproc', 4) + + if not osp.exists(storage): + ans_map = {k: YOrN_Extraction(v) for k, v in zip(data['index'], data['prediction'])} + if osp.exists(tmp_file): + tmp = load(tmp_file) + for k in tmp: + if ans_map[k] == 'Unknown' and tmp[k] != 'Unknown': + ans_map[k] = tmp[k] + + data['extracted'] = [ans_map[x] for x in data['index']] + unknown = data[data['extracted'] == 'Unknown'] + + model = judge_kwargs.get('model', 'exact_matching') + if model == 'exact_matching': + model = None + elif gpt_key_set(): + model = build_judge(**judge_kwargs) + if not model.working(): + warnings.warn('OPENAI API is not working properly, will use exact matching for evaluation') + warnings.warn(DEBUG_MESSAGE) + model = None + else: + model = None + warnings.warn('OPENAI_API_KEY is not working properly, will use exact matching for evaluation') + + if model is not None: + lt = len(unknown) + lines = [unknown.iloc[i] for i in range(lt)] + tups = [(model, line) for line in lines] + indices = list(unknown['index']) + if len(tups): + res = track_progress_rich( + YOrN_auxeval, tups, nproc=nproc, chunksize=nproc, keys=indices, save=tmp_file) + for k, v in zip(indices, res): + ans_map[k] = v + + data['extracted'] = [ans_map[x] for x in data['index']] + dump(data, storage) + + data = load(storage) + if listinstr(['AMBER'], dataset): + data['score'] = (data['answer'].str.lower() == data['extracted'].str.lower()) + else: + data['score'] = (data['answer'] == data['extracted']) + dump(data, storage) + + if dataset is not None and listinstr(['MME'], dataset): + score = MME_rating(storage) + elif dataset is not None and listinstr(['Hallusion'], dataset): + score = Hallusion_rating(storage) + elif dataset is not None and listinstr(['POPE'], dataset): + score = POPE_rating(storage) + elif dataset is not None and listinstr(['AMBER'], dataset): + score = AMBER_rating(storage) + else: + score = default_rating(storage) + + score_tgt = eval_file.replace('.xlsx', '_score.csv') + dump(score, score_tgt) + return score diff --git a/vlmeval/dataset/longvideobench.py b/vlmeval/dataset/longvideobench.py new file mode 100644 index 0000000000000000000000000000000000000000..5204713b1a3f04da2e13daff8b15edfda5233762 --- /dev/null +++ b/vlmeval/dataset/longvideobench.py @@ -0,0 +1,328 @@ +from huggingface_hub import snapshot_download +from ..smp import * +from .video_base import VideoBaseDataset +from .utils import build_judge, DEBUG_MESSAGE +from glob import glob + +FAIL_MSG = 'Failed to obtain answer via API.' + + +def timestamp_to_seconds(timestamp): + # Split the timestamp into hours, minutes, and seconds + h, m, s = timestamp.split(":") + # Convert hours, minutes, and total seconds (including fractions) to float and compute total seconds + total_seconds = int(h) * 3600 + int(m) * 60 + float(s) + return total_seconds + + +def uniformly_subsample(lst, K): + n = len(lst) + if K >= n: + return lst + step = n / K + return [lst[int(i * step)] for i in range(K)] + + +def insert_subtitles_into_frames( + frames, + frame_timestamps, + subtitles, + starting_timestamp_for_subtitles, + duration, +): + interleaved_list = [] + cur_i = 0 + + for subtitle in subtitles: + if "timestamp" in subtitle: + start, end = subtitle["timestamp"] + + if not isinstance(end, float): + end = duration + + start -= starting_timestamp_for_subtitles + end -= starting_timestamp_for_subtitles + + subtitle_timestamp = (start + end) / 2 + subtitle_text = subtitle["text"] + else: + start, end = subtitle["start"], subtitle["end"] + start = timestamp_to_seconds(start) + end = timestamp_to_seconds(end) + start -= starting_timestamp_for_subtitles + end -= starting_timestamp_for_subtitles + + subtitle_timestamp = (start + end) / 2 + subtitle_text = subtitle["line"] + + for i, (frame, frame_timestamp) in enumerate( + zip(frames[cur_i:], frame_timestamps[cur_i:]) + ): + if frame_timestamp <= subtitle_timestamp: + # print("frame:", frame_timestamp) + interleaved_list.append({"type": "image", "value": frame}) + cur_i += 1 + else: + break + + if end - start < 1: + end = subtitle_timestamp + 0.5 + start = subtitle_timestamp - 0.5 + + covering_frames = False + for frame, frame_timestamp in zip(frames, frame_timestamps): + if frame_timestamp < end and frame_timestamp > start: + covering_frames = True + break + + if covering_frames: + interleaved_list.append({"type": "text", "value": subtitle_text + "\n"}) + else: + pass + + for i, (frame, frame_timestamp) in enumerate( + zip(frames[cur_i:], frame_timestamps[cur_i:]) + ): + interleaved_list.append({"type": "image", "value": frame}) + return interleaved_list + + +class LongVideoBench(VideoBaseDataset): + + MD5 = '82905eae3a5ae7383c5a8ee9655e1ab9' + SYS = '' + + TYPE = 'Video-MCQ' + + def __init__(self, dataset='LongVideoBench', use_subtitle=False, nframe=0, fps=-1): + super().__init__(dataset=dataset, nframe=nframe, fps=fps) + self.use_subtitle = use_subtitle + self.dataset_name = dataset + + @classmethod + def supported_datasets(cls): + return ['LongVideoBench'] + + def prepare_dataset(self, dataset_name='LongVideoBench', repo_id='longvideobench/LongVideoBench'): + + def check_integrity(pth): + data_file = osp.join(pth, f'{dataset_name}.tsv') + + if not osp.exists(data_file): + return False + + if md5(data_file) != self.MD5: + print("md5 mismatch", md5(data_file), self.MD5) + return False + data = load(data_file) + for video_pth in data['video_path']: + if not osp.exists(osp.join(pth, video_pth)): + print(video_pth, "is not found") + return False + return True + + if modelscope_flag_set(): + repo_id = "AI-ModelScope/LongVideoBench" + + cache_path = get_cache_path(repo_id) + if cache_path is not None and check_integrity(cache_path): + dataset_path = cache_path + else: + def generate_tsv(pth): + data_file = osp.join(pth, f'{dataset_name}.tsv') + if osp.exists(data_file) and md5(data_file) == self.MD5: + return + + data_file = pd.read_json(osp.join(pth, 'lvb_val.json')) + data_file = data_file.assign(index=range(len(data_file))) + data_file['video'] = data_file['video_id'] + data_file['video_path'] = data_file['video_path'].apply(lambda x: f'./videos/{x}') + + data_file.to_csv(osp.join(pth, f'{dataset_name}.tsv'), sep='\t', index=False) + + if modelscope_flag_set(): + from modelscope import dataset_snapshot_download + dataset_snapshot_download(dataset_id=repo_id) + else: + snapshot_download(repo_id=repo_id, repo_type='dataset') + print("All videos are downloaded for LongVideoBench") + + if not glob(osp.join(cache_path, "videos")): + tar_files = glob(osp.join(cache_path, "**/*.tar*"), recursive=True) + + def untar_video_data(tar_file, cache_dir): + import tarfile + with tarfile.open(tar_file, "r") as tar_ref: + tar_ref.extractall(cache_dir) + print(f"Extracted all files from {tar_file} to {cache_dir}") + + def concat_tar_parts(tar_parts, output_tar): + with open(output_tar, "wb") as out_tar: + from tqdm import tqdm + for part in tqdm(sorted(tar_parts)): + with open(part, "rb") as part_file: + out_tar.write(part_file.read()) + print(f"Concatenated parts {tar_parts} into {output_tar}") + + tar_parts_dict = {} + + # Group tar parts together + for tar_file in tar_files: + base_name = tar_file.split(".tar")[0] + if base_name not in tar_parts_dict: + tar_parts_dict[base_name] = [] + tar_parts_dict[base_name].append(tar_file) + + # Concatenate and untar split parts + for base_name, parts in tar_parts_dict.items(): + print(f"Extracting following tar files: {parts}") + output_tar = base_name + ".tar" + if not osp.exists(output_tar): + print('Start concatenating tar files') + + concat_tar_parts(parts, output_tar) + print('Finish concatenating tar files') + + if not osp.exists(osp.join(cache_path, osp.basename(base_name))): + untar_video_data(output_tar, cache_path) + + print('All videos are extracted for LongVideoBench') + + dataset_path = cache_path + generate_tsv(dataset_path) + + data_file = osp.join(dataset_path, f'{dataset_name}.tsv') + + return dict(data_file=data_file, root=dataset_path) + + def save_video_frames(self, video_path, video_llm=False): + + vid_path = osp.join(self.data_root, video_path) + vid = decord.VideoReader(vid_path) + video_info = { + 'fps': vid.get_avg_fps(), + 'n_frames': len(vid), + } + if self.nframe > 0 and self.fps < 0: + step_size = len(vid) / (self.nframe + 1) + indices = [int(i * step_size) for i in range(1, self.nframe + 1)] + frame_paths = self.frame_paths(video_path[:-4]) + elif self.fps > 0: + # not constrained by num_frames, get frames by fps + total_duration = video_info['n_frames'] / video_info['fps'] + required_frames = int(total_duration * self.fps) + step_size = video_info['fps'] / self.fps + indices = [int(i * step_size) for i in range(required_frames)] + frame_paths = self.frame_paths_fps(video_path[:-4], len(indices)) + + flag = np.all([osp.exists(p) for p in frame_paths]) + + if not flag: + images = [vid[i].asnumpy() for i in indices] + images = [Image.fromarray(arr) for arr in images] + for im, pth in zip(images, frame_paths): + if not osp.exists(pth) and not video_llm: + im.save(pth) + + return frame_paths, indices, video_info + + # def save_video_into_images(self, line, num_frames=8): + # frame_paths, indices, video_info = self.save_video_frames(line['video_path'], num_frames) + # return frame_paths + + def build_prompt(self, line, video_llm): + if isinstance(line, int): + assert line < len(self) + line = self.data.iloc[line] + + frames, indices, video_info = self.save_video_frames(line['video_path'], video_llm) + fps = video_info["fps"] + + message = [dict(type='text', value=self.SYS)] + if video_llm: + message.append(dict(type='video', value=osp.join(self.data_root, line['video_path']))) + else: + if not self.use_subtitle: + with open(osp.join(self.data_root, "subtitles", line["subtitle_path"])) as f: + subtitles = json.load(f) + + frame_message = insert_subtitles_into_frames( + frames, + [ind_ / fps for ind_ in indices], + subtitles, + line["starting_timestamp_for_subtitles"], + line["duration"] + ) + + message += frame_message + else: + for im in frames: + message.append(dict(type='image', value=im)) + + line['question'] += '\n' + '\n'.join( + ["{}. {}".format(chr(ord("A") + i), cand) for i, cand in enumerate(eval(line['candidates']))] + ) + prompt = line["question"] + "\nAnswer with the option's letter from the given choices directly." + message.append(dict(type='text', value=prompt)) + return message + + # It returns a dictionary + @classmethod + def evaluate(self, eval_file, **judge_kwargs): + from .utils.longvideobench import get_dimension_rating, extract_characters_regex, extract_option + + assert eval_file.endswith('.xlsx'), 'data file should be an xlsx file' + + tmp_file = eval_file.replace('.xlsx', '_tmp.pkl') + tgt_file = eval_file.replace('.xlsx', '_rating.json') + score_file = eval_file.replace('.xlsx', '_score.xlsx') + + if not osp.exists(score_file): + model = judge_kwargs.get('model', 'exact_matching') + assert model in ['chatgpt-0125', 'exact_matching', 'gpt-4-0125'] + + if model == 'exact_matching': + model = None + elif gpt_key_set(): + model = build_judge(**judge_kwargs) + if not model.working(): + warnings.warn('OPENAI API is not working properly, will use exact matching for evaluation') + warnings.warn(DEBUG_MESSAGE) + model = None + else: + warnings.warn('OPENAI_API_KEY is not set properly, will use exact matching for evaluation') + model = None + res = {} if not osp.exists(tmp_file) else load(tmp_file) + res = {k: v for k, v in res.items() if FAIL_MSG not in v} + + data = load(eval_file) + data_un = data[~pd.isna(data['prediction'])] + + for idx in data['index']: + ans = data.loc[data['index'] == idx, 'correct_choice'].values[0] + ans = chr(ord("A") + ans) + pred = str(data.loc[data['index'] == idx, 'prediction'].values[0]) + + if extract_characters_regex(pred) == '': + extract_pred = extract_option( + model, + data.loc[data['index'] == idx].to_dict(orient='records')[0], + 'LongVideoBench' + ) + data.loc[idx, 'score'] = int(extract_pred == ans) + else: + data.loc[idx, 'score'] = int(extract_characters_regex(pred) == ans) + + rejected = [x for x in data['score'] if x == -1] + + print( + f'Among {len(data)} questions, failed to obtain prediction for {len(data) - len(data_un)} questions, ' + f'failed to obtain the score for another {len(rejected)} questions. ' + f'Those questions will be counted as -1 score in ALL rating, and will not be counted in VALID rating.' + ) + + dump(data, score_file) + + rating = get_dimension_rating(score_file) + dump(rating, tgt_file) + return rating diff --git a/vlmeval/dataset/megabench.py b/vlmeval/dataset/megabench.py new file mode 100644 index 0000000000000000000000000000000000000000..6f1831a5049679d3b0824f7e8b4fd14da0361f52 --- /dev/null +++ b/vlmeval/dataset/megabench.py @@ -0,0 +1,435 @@ +import math +import re +import tempfile +import cv2 +from vlmeval.smp import * +from vlmeval.dataset.video_base import VideoBaseDataset +from vlmeval.dataset.utils.megabench.evaluator import MEGABenchEvaluator +import json +import glob + + +class MEGABench(VideoBaseDataset): + TYPE = 'Video-VQA' + ZIP_MD5 = '5ec01ab69cd25b643c4f5e1396e96441' + MODALITY = 'VIDEO' + + def __init__(self, dataset='MEGABench', use_subtitle=False, nframe=0, fps=-1, subset_name="core"): + self.subset_name = subset_name + super().__init__(dataset=dataset, nframe=nframe, fps=fps) + self.use_subtitle = use_subtitle + self.dataset_name = dataset + self.max_num_frames = nframe + self.total_demo_video_frames = nframe / 4 + self.max_side = 1000 + + def _set_sampling_config(self, line): + def count_videos(media_str): + if not media_str or media_str == '[]': + return 0 + try: + media_list = eval(str(media_str)) + num_videos = sum(1 for m in media_list if self.is_video_file(m)) + return num_videos + except: + return 0 + + num_query_videos = 0 + num_demo_videos = 0 + + num_query_videos += count_videos(line['global_media']) + num_demo_videos += count_videos(line['example_media']) + num_query_videos += count_videos(line['query_media']) + + # print("num_query_videos, num_demo_videos:", num_query_videos, num_demo_videos) + + if hasattr(self, 'max_num_frames') and self.max_num_frames: + if num_demo_videos > 0: + self.demo_video_frames = math.ceil( + self.total_demo_video_frames / num_demo_videos + ) if hasattr(self, 'total_demo_video_frames') else 2 + else: + self.demo_video_frames = 0 + + if num_query_videos > 0: + total_query_video_frames = ( + self.max_num_frames + - self.demo_video_frames * num_demo_videos + ) + if total_query_video_frames <= 0: + raise ValueError( + f"Cannot query <= 0 frames: please raise the number of maximum images allowed. " + f"demo_video_frames={self.demo_video_frames}, num_demo_videos={num_demo_videos}, " + f"max_num_frames={self.max_num_frames}" + ) + self.query_video_frames = total_query_video_frames // num_query_videos + else: + self.query_video_frames = 0 + + else: + self.demo_video_frames = 2 + self.query_video_frames = 8 + + # print("demo_video_frames, query_video_frames:", self.demo_video_frames, self.query_video_frames) + + def is_video_file(self, file_path): + from mimetypes import guess_type + mime_type, _ = guess_type(file_path) + if not mime_type: + return False + return mime_type.startswith("video") + + @classmethod + def supported_datasets(cls): + return ['MEGABench'] + + def prepare_dataset(self, dataset_name='MEGABench', repo_id='TIGER-Lab/MEGA-Bench'): + def not_integrity(dataset_path): + zip_file = osp.join(dataset_path, 'data.zip') + return self.ZIP_MD5 != md5(zip_file) + + def unzip_hf_zip(pth, hub_pth): + dataset_path = osp.join(pth, 'images') # LMUData/images + os.makedirs(dataset_path, exist_ok=True) + + # 解压到megabench目录 + extract_path = osp.join(dataset_path, 'MEGABench') + if not osp.exists(extract_path): + zip_path = osp.join(hub_pth, 'data.zip') + import zipfile + with zipfile.ZipFile(zip_path, 'r') as zip_ref: + zip_ref.extractall(extract_path) + return extract_path + + def generate_tsv(pth, data_file, dataset, split='test'): + if osp.exists(data_file): + print(f'TSV file already exists at {data_file}') + return + + def process_media_path(media_str, base_path): + if media_str == '[]': + return media_str + try: + media_list = eval(media_str) + media_list = [osp.join(base_path, path.lstrip('./')) for path in media_list] + return str(media_list) + except: + return media_str + + def check_field(field): + if isinstance(field, str): + field = field.replace('\t', ' ') + field = ' '.join(field.split()) + return field + return ' ' + + with open(data_file, 'w', encoding='utf-8') as f: + import csv + writer = csv.writer(f, delimiter='\t', quoting=csv.QUOTE_MINIMAL, + quotechar='"', escapechar='\\') + headers = [ + 'index', 'task_name', 'task_description', 'global_media', + 'example_text', 'example_media', 'question', 'query_media', + 'answer', 'metric_info', 'eval_context','video' + ] + writer.writerow(headers) + + for item in dataset[split]: + global_media = process_media_path(str(item['global_media']), pth) + example_media = process_media_path(str(item['example_media']), pth) + query_media = process_media_path(str(item['query_media']), pth) + row = [ + check_field(str(item['id'])), + check_field(item['task_name']), + check_field(item['task_description']), + check_field(global_media), + check_field(item['example_text']), + check_field(example_media), + check_field(item['query_text']), + check_field(query_media), + check_field(item['answer']), + check_field(item['metric_info']), + check_field(item['eval_context']), + ] + row = [str(field).replace('\t', ' ') for field in row] + f.write('\t'.join(row) + '\n') + + print(f'Generated TSV file at {data_file} with {len(dataset[split])} entries') + + from datasets import load_dataset + dataset = load_dataset(repo_id, self.subset_name) + lmu_root = LMUDataRoot() + dataset_path = get_cache_path(repo_id) + if dataset_path is None or not_integrity(dataset_path): + print(f'download {repo_id} dataset automatically') + from huggingface_hub import snapshot_download + dataset_path = snapshot_download(repo_id=repo_id, repo_type='dataset') + dataset_path = unzip_hf_zip(lmu_root, dataset_path) + data_file_path = osp.join(lmu_root, f'{dataset_name}_{self.subset_name}.tsv') + generate_tsv(dataset_path, data_file_path, dataset, 'test') + + return dict(data_file=data_file_path, root=dataset_path) + + def build_prompt(self, line, video_llm): + + if isinstance(line, int): + assert line < len(self) + line = self.data.iloc[line] + + def process_video(file_path, is_demo=False): + if video_llm: + return (dict(type='video', value=file_path)) + else: + msg = [] + msg.append(dict(type='text', value="")) + msg.extend(_process_video(file_path, is_demo)) + msg.append(dict(type='text', value="")) + return msg + + def _process_video(file_path, is_demo=False): + # Open the video file + cap = cv2.VideoCapture(file_path) + frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) + fps = cap.get(cv2.CAP_PROP_FPS) # Frames per second + num_frames = self.demo_video_frames if is_demo else self.query_video_frames + + # the sampling rate using max number of frames + sampling_gap_maxframe = ( + 1 if not num_frames else math.ceil(frame_count / num_frames) + ) + + if fps >= 10: + sampling_gap = max(math.ceil(fps / 5), sampling_gap_maxframe) + else: + sampling_gap = sampling_gap_maxframe + + frame_number = 0 + msg = [] + base_path = osp.splitext(file_path)[0] + existing_frames = glob.glob(f"{base_path}_frame_*.jpg") + for f in existing_frames: + try: + os.remove(f) + except: + pass + + frame_idx = 0 + while True: + success, frame = cap.read() + if not success: + break + # Sample frames based on the dynamic sampling rate + if frame_number % sampling_gap == 0: + frame_filename = f"{base_path}_frame_{frame_idx:04d}.jpg" + os.makedirs(osp.dirname(frame_filename), exist_ok=True) + cv2.imwrite(frame_filename, frame) + frame_filename = _encode_image(frame_filename) + msg.append(dict(type='image', value=frame_filename)) + frame_idx += 1 + frame_number += 1 + if frame_number == 0: + raise ValueError(f"Failed to read video from {file_path}, check data...") + cap.release() + + return msg + + def _encode_image(image_path): + original_path = image_path # 字符串不需要 deepcopy + current_path = image_path # 跟踪当前处理阶段的路径 + image = None + rgba_transform = False + + try: + # 第一阶段:RGBA 转换 + image = Image.open(current_path) + if image.mode == 'RGBA': + try: + background = Image.new("RGBA", image.size, (255, 255, 255, 255)) + image = Image.alpha_composite(background, image).convert("RGB") + base_path = osp.splitext(current_path)[0] + current_path = f"{base_path}_rgb.jpg" + image.save(current_path, "JPEG") + print(f'Turn RGBA image into RGB mode, stored to {current_path}') + rgba_transform = True + except Exception as e: + print(f"Warning: Failed to convert RGBA image {current_path}: {e}") + # 使用原始图像继续处理 + image = Image.open(original_path) + + if rgba_transform: + original_path = current_path + + # 第二阶段:调整大小 + resize_scale = self.max_side / max(image.size) + if resize_scale < 1: + try: + new_size = (int(image.size[0] * resize_scale), int(image.size[1] * resize_scale)) + image = image.resize(new_size) + base_path = osp.splitext(current_path)[0] + current_path = f"{base_path}_resize.jpg" + image.save(current_path) + print(f'Resized image, stored to {current_path}') + except Exception as e: + print(f"Warning: Failed to resize image {current_path}: {e}") + return original_path # 返回当前路径(可能是 RGB 转换后的) + + return current_path + + except Exception as e: + print(f"Warning: Critical error processing image {original_path}: {e}") + return original_path # 任何严重错误都返回原始路径 + + def create_media_content(file_path, is_demo=False): + if self.is_video_file(file_path): + # Handle video processing with the frame subsampling logic + return process_video(file_path, is_demo) + else: + # Handle image processing otherwise + return (dict(type='image', value=_encode_image(file_path))) + + def process_media_list(media_str): + if not media_str or media_str == '[]': + return None + try: + if not isinstance(media_str, str): + media_str = str(media_str) + media_list = eval(media_str) + if isinstance(media_list, list): + return media_list + return None + except: + return None + + def process_text_and_media(text, media_list, is_demo=False): + if not media_list: + return [dict(type='text', value=text.strip())] + + message = [] + chunks = re.split(r'(|