KurtMica commited on
Commit
d088b76
·
1 Parent(s): 236bb17

ACL 2025 paper citation.

Browse files
Files changed (3) hide show
  1. README.md +2 -0
  2. app.py +1 -1
  3. src/about.py +59 -22
README.md CHANGED
@@ -7,6 +7,8 @@ sdk: gradio
7
  app_file: app.py
8
  pinned: true
9
  license: apache-2.0
 
 
10
  short_description: Evaluation of language models on Maltese tasks
11
  sdk_version: 5.19.0
12
  ---
 
7
  app_file: app.py
8
  pinned: true
9
  license: apache-2.0
10
+ tags:
11
+ - leaderboard
12
  short_description: Evaluation of language models on Maltese tasks
13
  sdk_version: 5.19.0
14
  ---
app.py CHANGED
@@ -100,7 +100,7 @@ def init_leaderboard(dataframe):
100
  demo = gr.Blocks(css=custom_css)
101
  with demo:
102
  gr.HTML(TITLE)
103
- gr.Markdown(INTRODUCTION_TEXT, elem_classes="markdown-text")
104
 
105
  with gr.Tabs(elem_classes="tab-buttons") as tabs:
106
  with gr.TabItem("🏅 LLM Benchmark", elem_id="llm-benchmark-tab-table", id=0):
 
100
  demo = gr.Blocks(css=custom_css)
101
  with demo:
102
  gr.HTML(TITLE)
103
+ gr.HTML(INTRODUCTION_TEXT, elem_classes="markdown-text")
104
 
105
  with gr.Tabs(elem_classes="tab-buttons") as tabs:
106
  with gr.TabItem("🏅 LLM Benchmark", elem_id="llm-benchmark-tab-table", id=0):
src/about.py CHANGED
@@ -1,3 +1,5 @@
 
 
1
  from dataclasses import dataclass
2
  from enum import Enum
3
 
@@ -18,6 +20,7 @@ class Task:
18
  benchmark: str
19
  metric: str
20
  col_name: str
 
21
  task_type: TaskType
22
  is_primary_metric: bool = True
23
 
@@ -26,22 +29,22 @@ class Task:
26
  # ---------------------------------------------------
27
  class Tasks(Enum):
28
  # task_key in the json file, metric_key in the json file, name to display in the leaderboard
29
- task0 = Task("sentiment_mlt", "f1", "Sentiment Analysis (F1)", TaskType.NLU)
30
- task1 = Task("sib200_mlt", "f1", "SIB200 (F1)", TaskType.NLU)
31
- task2 = Task("taxi1500_mlt", "f1", "Taxi1500 (F1)", TaskType.NLU)
32
- task3 = Task("maltese_news_categories", "loglikelihood", "Maltese News Categories (F1)", TaskType.NLU)
33
- task4 = Task("multieurlex_mlt", "loglikelihood", "MultiEURLEX (F1)", TaskType.NLU)
34
- task5 = Task("belebele_mlt", "acc", "Belebele (Accuracy)", TaskType.NLU)
35
- task6 = Task("opus100_eng-mlt", "bleu", "OPUS-100 EN→MT (BLEU)", TaskType.NLG, False)
36
- task7 = Task("opus100_eng-mlt", "chrf", "OPUS-100 EN→MT (ChrF)", TaskType.NLG)
37
- task8 = Task("flores200_eng-mlt", "bleu", "Flores-200 EN→MT (BLEU)", TaskType.NLG, False)
38
- task9 = Task("flores200_eng-mlt", "chrf", "Flores-200 EN→MT (ChrF)", TaskType.NLG)
39
- task10 = Task("webnlg_mlt", "chrf", "WebNLG (ChrF)", TaskType.NLG)
40
- task11 = Task("webnlg_mlt", "rouge", "WebNLG (Rouge-L)", TaskType.NLG, False)
41
- task12 = Task("eurlexsum_mlt", "chrf", "EUR-Lex-Sum (ChrF)", TaskType.NLG, False)
42
- task13 = Task("eurlexsum_mlt", "rouge", "EUR-Lex-Sum (Rouge-L)", TaskType.NLG)
43
- task14 = Task("maltese_news_headlines", "chrf", "Maltese News Headlines (ChrF)", TaskType.NLG, False)
44
- task15 = Task("maltese_news_headlines", "rouge", "Maltese News Headlines (Rouge-L)", TaskType.NLG)
45
 
46
  NUM_FEWSHOT = 0 # Change with your few shot
47
  # ---------------------------------------------------
@@ -49,20 +52,36 @@ NUM_FEWSHOT = 0 # Change with your few shot
49
 
50
 
51
  # Your leaderboard name
52
- TITLE = """<h1 align="center" id="space-title">🇲🇹 MELABench Leaderboard</h1>"""
 
 
 
 
 
53
 
54
  # What does your leaderboard evaluate?
55
  INTRODUCTION_TEXT = """
56
- A Maltese Evaluation Language Benchmark
57
  """
58
 
59
  # Which evaluations are you running? how can people reproduce what you have?
 
 
 
60
  LLM_BENCHMARKS_TEXT = f"""
61
- ## How it works
62
-
63
- ## Reproducibility
64
- To reproduce our results, here is the commands you can run:
 
 
 
 
 
 
 
65
 
 
66
  """
67
 
68
  EVALUATION_QUEUE_TEXT = """
@@ -73,4 +92,22 @@ In addition, we ask for additional metadata about model training.
73
 
74
  CITATION_BUTTON_LABEL = "Copy the following snippet to cite these results"
75
  CITATION_BUTTON_TEXT = r"""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
76
  """
 
1
+ import re
2
+
3
  from dataclasses import dataclass
4
  from enum import Enum
5
 
 
20
  benchmark: str
21
  metric: str
22
  col_name: str
23
+ url: str
24
  task_type: TaskType
25
  is_primary_metric: bool = True
26
 
 
29
  # ---------------------------------------------------
30
  class Tasks(Enum):
31
  # task_key in the json file, metric_key in the json file, name to display in the leaderboard
32
+ task0 = Task("sentiment_mlt", "f1", "Sentiment Analysis (F1)", "https://github.com/jerbarnes/typology_of_crosslingual/tree/master/data/sentiment/mt", TaskType.NLU)
33
+ task1 = Task("sib200_mlt", "f1", "SIB200 (F1)", "https://huggingface.co/datasets/Davlan/sib200/viewer/mlt_Latn", TaskType.NLU)
34
+ task2 = Task("taxi1500_mlt", "f1", "Taxi1500 (F1)", "https://github.com/cisnlp/Taxi1500", TaskType.NLU)
35
+ task3 = Task("maltese_news_categories", "loglikelihood", "Maltese News Categories (F1)", "https://huggingface.co/datasets/MLRS/maltese_news_categories", TaskType.NLU)
36
+ task4 = Task("multieurlex_mlt", "loglikelihood", "MultiEURLEX (F1)", "https://huggingface.co/datasets/nlpaueb/multi_eurlex", TaskType.NLU)
37
+ task5 = Task("belebele_mlt", "acc", "Belebele (Accuracy)", "https://huggingface.co/datasets/facebook/belebele/viewer/mlt_Latn", TaskType.NLU)
38
+ task6 = Task("opus100_eng-mlt", "bleu", "OPUS-100 EN→MT (BLEU)", "https://huggingface.co/datasets/MLRS/OPUS-MT-EN-Fixed", TaskType.NLG, False)
39
+ task7 = Task("opus100_eng-mlt", "chrf", "OPUS-100 EN→MT (ChrF)", "https://huggingface.co/datasets/MLRS/OPUS-MT-EN-Fixed", TaskType.NLG)
40
+ task8 = Task("flores200_eng-mlt", "bleu", "Flores-200 EN→MT (BLEU)", "https://huggingface.co/datasets/Muennighoff/flores200", TaskType.NLG, False)
41
+ task9 = Task("flores200_eng-mlt", "chrf", "Flores-200 EN→MT (ChrF)", "https://huggingface.co/datasets/Muennighoff/flores200", TaskType.NLG)
42
+ task10 = Task("webnlg_mlt", "chrf", "WebNLG (ChrF)", "https://synalp.gitlabpages.inria.fr/webnlg-challenge/challenge_2023/", TaskType.NLG)
43
+ task11 = Task("webnlg_mlt", "rouge", "WebNLG (Rouge-L)", "https://synalp.gitlabpages.inria.fr/webnlg-challenge/challenge_2023/", TaskType.NLG, False)
44
+ task12 = Task("eurlexsum_mlt", "chrf", "EUR-Lex-Sum (ChrF)", "https://huggingface.co/datasets/dennlinger/eur-lex-sum", TaskType.NLG, False)
45
+ task13 = Task("eurlexsum_mlt", "rouge", "EUR-Lex-Sum (Rouge-L)", "https://huggingface.co/datasets/dennlinger/eur-lex-sum", TaskType.NLG)
46
+ task14 = Task("maltese_news_headlines", "chrf", "Maltese News Headlines (ChrF)", "https://huggingface.co/datasets/MLRS/maltese_news_headlines", TaskType.NLG, False)
47
+ task15 = Task("maltese_news_headlines", "rouge", "Maltese News Headlines (Rouge-L)", "https://huggingface.co/datasets/MLRS/maltese_news_headlines", TaskType.NLG)
48
 
49
  NUM_FEWSHOT = 0 # Change with your few shot
50
  # ---------------------------------------------------
 
52
 
53
 
54
  # Your leaderboard name
55
+ TITLE = """
56
+ <h1 align="center" id="space-title">
57
+ <img src="https://raw.githubusercontent.com/MLRS/MELABench/refs/heads/main/logo.jpg" alt="MELABench logo" width="200px">
58
+ Leaderboard
59
+ </h1>
60
+ """
61
 
62
  # What does your leaderboard evaluate?
63
  INTRODUCTION_TEXT = """
64
+ <p align="center">A Maltese Evaluation Language Benchmark 🇲🇹</p>
65
  """
66
 
67
  # Which evaluations are you running? how can people reproduce what you have?
68
+ tasks = {task_type.value.display_name: {} for task_type in TaskType}
69
+ for task in Tasks:
70
+ tasks[task.value.task_type.value.display_name][re.sub(r" \(.*\)$", "", task.value.col_name)] = task.value.url
71
  LLM_BENCHMARKS_TEXT = f"""
72
+ MELABench evaluates language model capabilities on Maltese.
73
+ Currently, the following tasks are supported:
74
+ """ + \
75
+ "\n".join([
76
+ f"- {task_type}:\n" + "\n".join(f" - [{task}]({url})" for task, url in sub_tasks.items()) + "\n"
77
+ for task_type, sub_tasks in tasks.items()
78
+ ]) + \
79
+ """
80
+ The leaderboard is developed and maintained by people managing [MLRS](https://mlrs.research.um.edu.mt/).
81
+ We plan to expand our initial work with more tasks, if you would like to contribute your data, please reach out!
82
+ If you would like to include results for models/setups we did not include, we also accept submissions.
83
 
84
+ This work was introduced in [MELABenchv1: Benchmarking Large Language Models against Smaller Fine-Tuned Models for Low-Resource Maltese NLP](https://arxiv.org/abs/2506.04385).
85
  """
86
 
87
  EVALUATION_QUEUE_TEXT = """
 
92
 
93
  CITATION_BUTTON_LABEL = "Copy the following snippet to cite these results"
94
  CITATION_BUTTON_TEXT = r"""
95
+ @inproceedings{micallef-borg-2025-melabenchv1,
96
+ title = "{MELAB}enchv1: Benchmarking Large Language Models against Smaller Fine-Tuned Models for Low-Resource {M}altese {NLP}",
97
+ author = "Micallef, Kurt and
98
+ Borg, Claudia",
99
+ editor = "Che, Wanxiang and
100
+ Nabende, Joyce and
101
+ Shutova, Ekaterina and
102
+ Pilehvar, Mohammad Taher",
103
+ booktitle = "Findings of the Association for Computational Linguistics: ACL 2025",
104
+ month = jul,
105
+ year = "2025",
106
+ address = "Vienna, Austria",
107
+ publisher = "Association for Computational Linguistics",
108
+ url = "https://aclanthology.org/2025.findings-acl.1053/",
109
+ doi = "10.18653/v1/2025.findings-acl.1053",
110
+ pages = "20505--20527",
111
+ ISBN = "979-8-89176-256-5",
112
+ }
113
  """