Repoaner commited on
Commit
0b6eee6
·
verified ·
1 Parent(s): da14a20

Update src/about.py

Browse files
Files changed (1) hide show
  1. src/about.py +6 -6
src/about.py CHANGED
@@ -21,15 +21,15 @@ NUM_FEWSHOT = 0 # Change with your few shot
21
 
22
 
23
  # Your leaderboard name
24
- TITLE = """<h1 align="center" id="space-title">MMIE</h1>"""
25
 
26
  # MJB_LOGO = '<img src="" alt="Logo" style="width: 30%; display: block; margin: auto;">'
27
 
28
  # What does your leaderboard evaluate?
29
  INTRODUCTION_TEXT = """
30
- # MMIE: Massive Multimodal Interleaved Comprehension Benchmark for Large Vision-Language Models
31
- We present MMIE, a Massive Multimodal Interleaved understanding Evaluation benchmark, designed for Large Vision-Language Models (LVLMs). MMIE offers a robust framework for evaluating the interleaved comprehension and generation capabilities of LVLMs across diverse fields, supported by reliable automated metrics.
32
- [Website](https://mmie-bench.github.io) | [Code](https://github.com/Lillianwei-h/MMIE) | [Dataset](https://huggingface.co/datasets/MMIE/MMIE) | [Results](https://huggingface.co/spaces/MMIE/Leaderboard) | [Evaluation Model](https://huggingface.co/MMIE/MMIE-Score) | [Paper](https://arxiv.org/abs/2410.10139)
33
  """
34
 
35
  # Which evaluations are you running? how can people reproduce what you have?
@@ -41,8 +41,8 @@ EVALUATION_QUEUE_TEXT = """
41
 
42
  CITATION_BUTTON_LABEL = "Copy the following snippet to cite these results"
43
  CITATION_BUTTON_TEXT = """
44
- @article{xia2024mmie,
45
- title={MMIE: Massive Multimodal Interleaved Comprehension Benchmark for Large Vision-Language Models},
46
  author={Xia, Peng and Han, Siwei and Qiu, Shi and Zhou, Yiyang and Wang, Zhaoyang and Zheng, Wenhao and Chen, Zhaorun and Cui, Chenhang and Ding, Mingyu and Li, Linjie and Wang, Lijuan and Yao, Huaxiu},
47
  journal={arXiv preprint arXiv:2410.10139},
48
  year={2024}
 
21
 
22
 
23
  # Your leaderboard name
24
+ TITLE = """<h1 align="center" id="space-title">Align-Anything</h1>"""
25
 
26
  # MJB_LOGO = '<img src="" alt="Logo" style="width: 30%; display: block; margin: auto;">'
27
 
28
  # What does your leaderboard evaluate?
29
  INTRODUCTION_TEXT = """
30
+ # Align-Anything: Massive Multimodal Interleaved Comprehension Benchmark for Large Vision-Language Models
31
+ We present Align-Anything, a Massive Multimodal Interleaved understanding Evaluation benchmark, designed for Large Vision-Language Models (LVLMs). Align-Anything offers a robust framework for evaluating the interleaved comprehension and generation capabilities of LVLMs across diverse fields, supported by reliable automated metrics.
32
+ [Website](https://Align-Anything-bench.github.io) | [Code](https://github.com/Lillianwei-h/Align-Anything) | [Dataset](https://huggingface.co/datasets/Align-Anything/Align-Anything) | [Results](https://huggingface.co/spaces/Align-Anything/Leaderboard) | [Evaluation Model](https://huggingface.co/Align-Anything/Align-Anything-Score) | [Paper](https://arxiv.org/abs/2410.10139)
33
  """
34
 
35
  # Which evaluations are you running? how can people reproduce what you have?
 
41
 
42
  CITATION_BUTTON_LABEL = "Copy the following snippet to cite these results"
43
  CITATION_BUTTON_TEXT = """
44
+ @article{xia2024Align-Anything,
45
+ title={Align-Anything: Massive Multimodal Interleaved Comprehension Benchmark for Large Vision-Language Models},
46
  author={Xia, Peng and Han, Siwei and Qiu, Shi and Zhou, Yiyang and Wang, Zhaoyang and Zheng, Wenhao and Chen, Zhaorun and Cui, Chenhang and Ding, Mingyu and Li, Linjie and Wang, Lijuan and Yao, Huaxiu},
47
  journal={arXiv preprint arXiv:2410.10139},
48
  year={2024}