File size: 5,765 Bytes
54fdeab 17c1bbe 54fdeab 17c1bbe 54fdeab 17c1bbe 54fdeab 17c1bbe 54fdeab 17c1bbe 54fdeab 17c1bbe 54fdeab 17c1bbe e72de6c 54fdeab e72de6c 54fdeab 17c1bbe e72de6c 54fdeab 17c1bbe e72de6c 54fdeab e72de6c 54fdeab 17c1bbe 54fdeab 17c1bbe 54fdeab 17c1bbe 54fdeab |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 |
import gradio as gr
from gradio_leaderboard import Leaderboard
from pathlib import Path
import pandas as pd
import os
import json
import requests
from envs import API, EVAL_REQUESTS_PATH, TOKEN, QUEUE_REPO
from utils import LLM_BENCHMARKS_ABOUT_TEXT, LLM_BENCHMARKS_SUBMIT_TEXT, custom_css
def fill_form(model_name, model_id, contact_email, challenge, submission_id, paper_link, architecture, license):
value = {
# Model name
"entry.1591601824": model_name,
# username/space
"entry.1171388028": model_id,
# Submission ID on CMT
"entry.171528970": submission_id,
# Preprint or paper link
"entry.1284338508": paper_link,
# Model architecture
"entry.1291571256": architecture,
# License
# Option: any text
"entry.272554778": license,
# Challenge
# Option: any text
"entry.1908975677": challenge,
# Email
# Option: any text
"entry.964644151": contact_email
}
return value
def sendForm(url, data):
try:
requests.post(url, data=data)
print("Submitted successfully!")
except:
print("Error!")
def submit(model_name, model_id, contact_email, challenge, submission_id, paper_link, architecture, license):
if model_name == "" or model_id == "" or challenge == "" or architecture == "" or license == "":
gr.Error("Please fill all the fields")
return
if submission_id == "" and paper_link =="":
gr.Error("Provide either a link to a paper describing the method or a submission ID for the MLSB workshop.")
return
try:
user_name = ""
if "/" in model_id:
user_name = model_id.split("/")[0]
model_path = model_id.split("/")[1]
eval_entry = {
"model_name": model_name,
"model_id": model_id,
"challenge": challenge,
"submission_id": submission_id,
"architecture": architecture,
"license": license
}
OUT_DIR = f"{EVAL_REQUESTS_PATH}/{user_name}"
os.makedirs(OUT_DIR, exist_ok=True)
out_path = f"{OUT_DIR}/{user_name}_{model_path}.json"
with open(out_path, "w") as f:
f.write(json.dumps(eval_entry))
print("Sending form")
formData = fill_form(model_name, model_id, contact_email, challenge, submission_id, paper_link, architecture, license)
sendForm("https://docs.google.com/forms/d/e/1FAIpQLSf1zP7RAFC5RLlva03xm0eIAPLKXOmMvNUzirbm82kdCUFKNw/formResponse", formData)
print("Uploading eval file")
API.upload_file(
path_or_fileobj=out_path,
path_in_repo=out_path.split("eval-queue/")[1],
repo_id=QUEUE_REPO,
repo_type="dataset",
commit_message=f"Add {model_name} to eval queue",
)
gr.Info("Successfully submitted", duration=10)
# Remove the local file
os.remove(out_path)
except:
gr.Error("Error submitting the model")
abs_path = Path(__file__).parent
# Any pandas-compatible data
persian_df = pd.read_json(str(abs_path / "leaderboard_persian.json"))
base_df = pd.read_json(str(abs_path / "leaderboard_base.json"))
with gr.Blocks(css=custom_css) as demo:
gr.Markdown("""
# Part LLM Leaderboard
""")
with gr.Tab("ποΈ Persian Leaderboard"):
gr.Markdown("""## Persian LLM Leaderboard
Evaluating Persian Fine-Tuned models
""")
Leaderboard(
value=persian_df,
select_columns=["Model", "Precision", "#Params (B)", "Part Multiple Choice", "ARC Easy", "ARC Challenging", "MMLU Pro", "GSM8k Persian", "Multiple Choice Persian"],
search_columns=["model_name_for_query"],
hide_columns=["model_name_for_query",],
filter_columns=["Precision", "#Params (B)"],
)
with gr.Tab("π₯ Base Leaderboard"):
gr.Markdown("""## Base LLM Leaderboard
Evaluating Base Models
""")
Leaderboard(
value=base_df,
select_columns=["Model", "Precision", "#Params (B)", "Part Multiple Choice", "ARC Easy", "ARC Challenging", "MMLU Pro", "GSM8k Persian", "Multiple Choice Persian"],
search_columns=["model_name_for_query"],
hide_columns=["model_name_for_query",],
filter_columns=["Precision", "#Params (B)"],
)
with gr.TabItem("π About"):
gr.Markdown(LLM_BENCHMARKS_ABOUT_TEXT)
with gr.Tab("βοΈ Submit"):
gr.Markdown(LLM_BENCHMARKS_SUBMIT_TEXT)
model_name = gr.Textbox(label="Model name")
model_id = gr.Textbox(label="username/space e.g mlsb/alphafold3")
contact_email = gr.Textbox(label="Contact E-Mail")
challenge = gr.Radio(choices=["Persian", "Base"],label="Challenge")
gr.Markdown("Either give a submission id if you submitted to the MLSB workshop or provide a link to the preprint/paper describing the method.")
with gr.Row():
submission_id = gr.Textbox(label="Submission ID on CMT")
paper_link = gr.Textbox(label="Preprint or Paper link")
architecture = gr.Dropdown(choices=["GNN", "CNN","Diffusion Model", "Physics-based", "Other"],label="Model architecture")
license = gr.Dropdown(choices=["mit", "apache-2.0", "gplv2", "gplv3", "lgpl", "mozilla", "bsd", "other"],label="License")
submit_btn = gr.Button("Submit")
submit_btn.click(submit, inputs=[model_name, model_id, contact_email, challenge, submission_id, paper_link, architecture, license], outputs=[])
gr.Markdown("""
Please find more information about the challenges on [mlsb.io/#challenge](https://mlsb.io/#challenge)""")
if __name__ == "__main__":
demo.launch() |