|
import re |
|
|
|
from dataclasses import dataclass |
|
from enum import Enum |
|
|
|
@dataclass |
|
class TaskDetails: |
|
name: str |
|
display_name: str = "" |
|
symbol: str = "" |
|
|
|
|
|
class TaskType(Enum): |
|
NLU = TaskDetails("nlu", "NLU", "🧠") |
|
NLG = TaskDetails("nlg", "NLG", "✍️") |
|
|
|
|
|
@dataclass |
|
class Task: |
|
benchmark: str |
|
metric: str |
|
col_name: str |
|
url: str |
|
task_type: TaskType |
|
is_primary_metric: bool = True |
|
|
|
|
|
|
|
|
|
class Tasks(Enum): |
|
|
|
task0 = Task("sentiment_mlt", "f1", "Sentiment Analysis (F1)", "https://github.com/jerbarnes/typology_of_crosslingual/tree/master/data/sentiment/mt", TaskType.NLU) |
|
task1 = Task("sib200_mlt", "f1", "SIB200 (F1)", "https://huggingface.co/datasets/Davlan/sib200/viewer/mlt_Latn", TaskType.NLU) |
|
task2 = Task("taxi1500_mlt", "f1", "Taxi1500 (F1)", "https://github.com/cisnlp/Taxi1500", TaskType.NLU) |
|
task3 = Task("maltese_news_categories", "loglikelihood", "Maltese News Categories (F1)", "https://huggingface.co/datasets/MLRS/maltese_news_categories", TaskType.NLU) |
|
task4 = Task("multieurlex_mlt", "loglikelihood", "MultiEURLEX (F1)", "https://huggingface.co/datasets/nlpaueb/multi_eurlex", TaskType.NLU) |
|
task5 = Task("belebele_mlt", "acc", "Belebele (Accuracy)", "https://huggingface.co/datasets/facebook/belebele/viewer/mlt_Latn", TaskType.NLU) |
|
task6 = Task("opus100_eng-mlt", "bleu", "OPUS-100 EN→MT (BLEU)", "https://huggingface.co/datasets/MLRS/OPUS-MT-EN-Fixed", TaskType.NLG, False) |
|
task7 = Task("opus100_eng-mlt", "chrf", "OPUS-100 EN→MT (ChrF)", "https://huggingface.co/datasets/MLRS/OPUS-MT-EN-Fixed", TaskType.NLG) |
|
task8 = Task("flores200_eng-mlt", "bleu", "Flores-200 EN→MT (BLEU)", "https://huggingface.co/datasets/Muennighoff/flores200", TaskType.NLG, False) |
|
task9 = Task("flores200_eng-mlt", "chrf", "Flores-200 EN→MT (ChrF)", "https://huggingface.co/datasets/Muennighoff/flores200", TaskType.NLG) |
|
task10 = Task("webnlg_mlt", "chrf", "WebNLG (ChrF)", "https://synalp.gitlabpages.inria.fr/webnlg-challenge/challenge_2023/", TaskType.NLG) |
|
task11 = Task("webnlg_mlt", "rouge", "WebNLG (Rouge-L)", "https://synalp.gitlabpages.inria.fr/webnlg-challenge/challenge_2023/", TaskType.NLG, False) |
|
task12 = Task("eurlexsum_mlt", "chrf", "EUR-Lex-Sum (ChrF)", "https://huggingface.co/datasets/dennlinger/eur-lex-sum", TaskType.NLG, False) |
|
task13 = Task("eurlexsum_mlt", "rouge", "EUR-Lex-Sum (Rouge-L)", "https://huggingface.co/datasets/dennlinger/eur-lex-sum", TaskType.NLG) |
|
task14 = Task("maltese_news_headlines", "chrf", "Maltese News Headlines (ChrF)", "https://huggingface.co/datasets/MLRS/maltese_news_headlines", TaskType.NLG, False) |
|
task15 = Task("maltese_news_headlines", "rouge", "Maltese News Headlines (Rouge-L)", "https://huggingface.co/datasets/MLRS/maltese_news_headlines", TaskType.NLG) |
|
|
|
NUM_FEWSHOT = 0 |
|
|
|
|
|
|
|
|
|
|
|
TITLE = """ |
|
<h1 align="center" id="space-title"> |
|
<img src="https://raw.githubusercontent.com/MLRS/MELABench/refs/heads/main/logo.jpg" alt="MELABench logo" width="200px"> |
|
Leaderboard |
|
</h1> |
|
""" |
|
|
|
|
|
INTRODUCTION_TEXT = """ |
|
<p align="center">A Maltese Evaluation Language Benchmark 🇲🇹</p> |
|
""" |
|
|
|
|
|
tasks = {task_type.value.display_name: {} for task_type in TaskType} |
|
for task in Tasks: |
|
tasks[task.value.task_type.value.display_name][re.sub(r" \(.*\)$", "", task.value.col_name)] = task.value.url |
|
LLM_BENCHMARKS_TEXT = f""" |
|
MELABench evaluates language model capabilities on Maltese. |
|
Currently, the following tasks are supported: |
|
""" + \ |
|
"\n".join([ |
|
f"- {task_type}:\n" + "\n".join(f" - [{task}]({url})" for task, url in sub_tasks.items()) + "\n" |
|
for task_type, sub_tasks in tasks.items() |
|
]) + \ |
|
""" |
|
The leaderboard is developed and maintained by people managing [MLRS](https://mlrs.research.um.edu.mt/). |
|
We plan to expand our initial work with more tasks, if you would like to contribute your data, please reach out! |
|
If you would like to include results for models/setups we did not include, we also accept submissions. |
|
|
|
This work was introduced in [MELABenchv1: Benchmarking Large Language Models against Smaller Fine-Tuned Models for Low-Resource Maltese NLP](https://arxiv.org/abs/2506.04385). |
|
""" |
|
|
|
EVALUATION_QUEUE_TEXT = """ |
|
To include new results on this benchmark, follow the instructions on our [GitHub Repository](https://github.com/MLRS/MELABench/tree/main/prompting). |
|
You can then upload the output files which should include the configuration/results file and all the prediction files. |
|
In addition, we ask for additional metadata about model training. |
|
""" |
|
|
|
CITATION_BUTTON_LABEL = "Copy the following snippet to cite these results" |
|
CITATION_BUTTON_TEXT = r""" |
|
@inproceedings{micallef-borg-2025-melabenchv1, |
|
title = "{MELAB}enchv1: Benchmarking Large Language Models against Smaller Fine-Tuned Models for Low-Resource {M}altese {NLP}", |
|
author = "Micallef, Kurt and |
|
Borg, Claudia", |
|
editor = "Che, Wanxiang and |
|
Nabende, Joyce and |
|
Shutova, Ekaterina and |
|
Pilehvar, Mohammad Taher", |
|
booktitle = "Findings of the Association for Computational Linguistics: ACL 2025", |
|
month = jul, |
|
year = "2025", |
|
address = "Vienna, Austria", |
|
publisher = "Association for Computational Linguistics", |
|
url = "https://aclanthology.org/2025.findings-acl.1053/", |
|
doi = "10.18653/v1/2025.findings-acl.1053", |
|
pages = "20505--20527", |
|
ISBN = "979-8-89176-256-5", |
|
} |
|
""" |
|
|