Leaderboard / app.py
Jerrycool's picture
Update app.py
0172b15 verified
raw
history blame
21.8 kB
# -*- coding: utf-8 -*-
import gradio as gr
import pandas as pd
from apscheduler.schedulers.background import BackgroundScheduler
# Removed Hugging Face Hub imports as they are not needed for the simplified leaderboard
# --- Make sure these imports work relative to your file structure ---
# Option 1: If src is a directory in the same folder as your script:
try:
from src.about import (
CITATION_BUTTON_LABEL,
CITATION_BUTTON_TEXT,
EVALUATION_QUEUE_TEXT, # Keep if used by commented-out submit tab
INTRODUCTION_TEXT,
LLM_BENCHMARKS_TEXT,
TITLE as ORIGINAL_TITLE, # Rename original import
)
from src.display.css_html_js import custom_css as original_css # Rename original import
from src.envs import REPO_ID # Keep if needed for restart_space or other functions
from src.submission.submit import add_new_eval # Keep if using the submit tab
print("Successfully imported from src module.")
# Start with original CSS if available
custom_css = original_css if isinstance(original_css, str) else ""
# Use original title if available, otherwise create a default
TITLE = ORIGINAL_TITLE if isinstance(ORIGINAL_TITLE, str) else "<h1>πŸ† MLE-Dojo Benchmark Leaderboard</h1>"
# Option 2: If you don't have these files, define placeholders (REMOVE THIS if using Option 1)
except ImportError:
print("Warning: Using placeholder values because src module imports failed.")
CITATION_BUTTON_LABEL="Citation"
CITATION_BUTTON_TEXT="Please cite us if you use this benchmark..."
EVALUATION_QUEUE_TEXT="Current evaluation queue:"
INTRODUCTION_TEXT="Welcome to the **MLE-Dojo Benchmark Leaderboard**, showcasing the performance of various models across different machine learning tasks. Select a category below to see the rankings based on Elo scores."
LLM_BENCHMARKS_TEXT="""
## About the Benchmarks
This leaderboard tracks model performance using Elo ratings across several key areas:
* **Overall:** A combined score reflecting performance across all categories.
* **MLE-Lite:** Benchmarks focusing on lightweight machine learning engineering tasks.
* **Tabular:** Performance on tasks involving structured, tabular data.
* **NLP:** Natural Language Processing capabilities.
* **CV:** Computer Vision tasks.
Models are ranked based on their Elo score within each category. Higher scores indicate better relative performance. Click on a model name to visit its associated page (if available).
"""
# Define an enhanced TITLE with an icon
TITLE = """
<div style="display: flex; align-items: center; justify-content: center; margin-bottom: 20px;">
<span style="font-size: 2.5em; margin-right: 15px;">πŸ†</span>
<h1 style="font-size: 2.8em; font-weight: 600; color: #333; margin: 0; line-height: 1.2;">
MLE-Dojo Benchmark Leaderboard
</h1>
</div>
"""
custom_css="" # Start with empty CSS if not imported
REPO_ID="your/space-id" # Replace with actual ID if needed
def add_new_eval(*args): return "Submission placeholder."
# --- End Placeholder Definitions ---
# --- Elo Leaderboard Configuration ---
# Enhanced data with Rank (placeholder), Organizer, License, and URL
data = [
{'model_name': 'gpt-4o-mini', 'url': 'https://openai.com/index/gpt-4o-mini-advancing-cost-efficient-intelligence/', 'organizer': 'OpenAI', 'license': 'Proprietary', 'MLE-Lite_Elo': 753, 'Tabular_Elo': 839, 'NLP_Elo': 758, 'CV_Elo': 754, 'Overall': 778},
{'model_name': 'gpt-4o', 'url': 'https://openai.com/index/hello-gpt-4o/', 'organizer': 'OpenAI', 'license': 'Proprietary', 'MLE-Lite_Elo': 830, 'Tabular_Elo': 861, 'NLP_Elo': 903, 'CV_Elo': 761, 'Overall': 841},
{'model_name': 'o3-mini', 'url': 'https://openai.com/index/openai-o3-mini/', 'organizer': 'OpenAI', 'license': 'Proprietary', 'MLE-Lite_Elo': 1108, 'Tabular_Elo': 1019, 'NLP_Elo': 1056, 'CV_Elo': 1207, 'Overall': 1096},
{'model_name': 'deepseek-v3', 'url': 'https://api-docs.deepseek.com/news/news1226', 'organizer': 'DeepSeek', 'license': 'DeepSeek', 'MLE-Lite_Elo': 1004, 'Tabular_Elo': 1015, 'NLP_Elo': 1028, 'CV_Elo': 1067, 'Overall': 1023},
{'model_name': 'deepseek-r1', 'url': 'https://api-docs.deepseek.com/news/news250120', 'organizer': 'DeepSeek', 'license': 'DeepSeek', 'MLE-Lite_Elo': 1137, 'Tabular_Elo': 1053, 'NLP_Elo': 1103, 'CV_Elo': 1083, 'Overall': 1100},
{'model_name': 'gemini-2.0-flash', 'url': 'https://ai.google.dev/gemini-api/docs/models#gemini-2.0-flash', 'organizer': 'Google', 'license': 'Proprietary', 'MLE-Lite_Elo': 847, 'Tabular_Elo': 923, 'NLP_Elo': 860, 'CV_Elo': 978, 'Overall': 895},
{'model_name': 'gemini-2.0-pro', 'url': 'https://blog.google/technology/google-deepmind/gemini-model-updates-february-2025/', 'organizer': 'Google', 'license': 'Proprietary', 'MLE-Lite_Elo': 1064, 'Tabular_Elo': 1139, 'NLP_Elo': 1028, 'CV_Elo': 973, 'Overall': 1054},
{'model_name': 'gemini-2.5-pro', 'url': 'https://deepmind.google/technologies/gemini/pro/', 'organizer': 'Google', 'license': 'Proprietary', 'MLE-Lite_Elo': 1257, 'Tabular_Elo': 1150, 'NLP_Elo': 1266, 'CV_Elo': 1177, 'Overall': 1214},
]
# Create a master DataFrame
master_df = pd.DataFrame(data)
# Define categories for selection (user-facing) - Emojis added
CATEGORIES = ["πŸ† Overall", "πŸ’‘ MLE-Lite", "πŸ“Š Tabular", "πŸ’¬ NLP", "πŸ‘οΈ CV"]
DEFAULT_CATEGORY = "πŸ† Overall" # Set a default category
# Map user-facing categories (WITHOUT emojis) to DataFrame column names
category_to_column = {
"Overall": "Overall", # Mapped from "πŸ† Overall"
"MLE-Lite": "MLE-Lite_Elo", # Mapped from "πŸ’‘ MLE-Lite"
"Tabular": "Tabular_Elo", # Mapped from "πŸ“Š Tabular"
"NLP": "NLP_Elo", # Mapped from "πŸ’¬ NLP"
"CV": "CV_Elo", # Mapped from "πŸ‘οΈ CV"
}
# --- Helper function to update leaderboard ---
def update_leaderboard(category_with_emoji):
"""
Selects relevant columns, sorts by the chosen category's Elo score,
adds Rank, formats model name as a link, and returns the DataFrame.
Handles category names with emojis.
"""
# Extract the base category name by removing the emoji and leading space
base_category = category_with_emoji.split(" ", 1)[-1]
score_column = category_to_column.get(base_category)
if score_column is None or score_column not in master_df.columns:
print(f"Warning: Invalid category '{base_category}' or column '{score_column}'. Falling back to default.")
default_base_category = DEFAULT_CATEGORY.split(" ", 1)[-1]
score_column = category_to_column[default_base_category]
# Check fallback column too
if score_column not in master_df.columns:
print(f"Error: Default column '{score_column}' also not found.")
# Return empty df with correct columns
return pd.DataFrame({
"Rank": [],
"Model": [],
"Elo Score": [],
"Organizer": [],
"License": []
})
# Select base columns + the score column for sorting
cols_to_select = ['model_name', 'url', 'organizer', 'license', score_column]
df = master_df[cols_to_select].copy()
# Sort by the selected 'Elo Score' descending
df.sort_values(by=score_column, ascending=False, inplace=True)
# Add Rank based on the sorted order
df.reset_index(drop=True, inplace=True)
df.insert(0, 'Rank', df.index + 1)
# Format Model Name as HTML Hyperlink with improved styling
df['Model'] = df.apply(
lambda row: f"<a href='{row['url'] if pd.notna(row['url']) else '#'}' target='_blank' "
f"style='color: #0056b3; text-decoration: none; font-weight: 500;'>"
f"{row['model_name']}</a>",
axis=1
)
# Rename the score column to 'Elo Score' for consistent display
df.rename(columns={score_column: 'Elo Score'}, inplace=True)
# Rename 'organizer' and 'license' to match desired display headers
df.rename(columns={'organizer': 'Organizer', 'license': 'License'}, inplace=True)
# Select and reorder columns for final display
final_columns = ["Rank", "Model", "Organizer", "License", "Elo Score"]
df = df[final_columns]
return df
# --- Mock/Placeholder functions/data for other tabs ---
# (If the Submit tab is used, ensure these variables are appropriately populated or handled)
print("Warning: Evaluation queue data fetching is disabled/mocked due to leaderboard changes.")
finished_eval_queue_df = pd.DataFrame(columns=["Model", "Status", "Requested", "Started"])
running_eval_queue_df = pd.DataFrame(columns=["Model", "Status", "Requested", "Started"])
pending_eval_queue_df = pd.DataFrame(columns=["Model", "Status", "Requested", "Started"])
EVAL_COLS = ["Model", "Status", "Requested", "Started"] # Define for the dataframe headers
EVAL_TYPES = ["str", "str", "str", "str"] # Define for the dataframe types
# --- Keep restart function if relevant ---
def restart_space():
# Make sure REPO_ID is correctly defined/imported if this function is used
print(f"Attempting to restart space: {REPO_ID}")
# Replace with your actual space restart mechanism if needed (e.g., HfApi().restart_space(REPO_ID))
# --- Enhanced CSS ---
enhanced_css = """
/* --- Overall Body and Font --- */
body, .gradio-container {
font-family: 'Inter', -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, 'Helvetica Neue', Arial, sans-serif; /* Modern Font Stack */
font-size: 16px !important; /* Base font size */
color: #212529; /* Slightly darker base text for better contrast on lighter bg */
background-color: #f8f9fa; /* Light gray page background */
line-height: 1.6;
}
/* --- Headings --- */
h1, .markdown-text h1 {
font-size: 2.6em !important;
font-weight: 700 !important;
color: #212529;
margin-bottom: 0.7em !important;
line-height: 1.3 !important;
text-align: center;
}
h2, .markdown-text h2 {
font-size: 1.8em !important;
font-weight: 600 !important;
color: #343a40;
margin-top: 1.5em !important;
margin-bottom: 0.8em !important;
border-bottom: 2px solid #dee2e6;
padding-bottom: 0.3em;
}
h3, .markdown-text h3 {
font-size: 1.4em !important;
font-weight: 600 !important;
color: #495057;
margin-top: 1.2em !important;
margin-bottom: 0.6em !important;
}
/* --- Markdown & Text --- */
.markdown-text p {
margin-bottom: 1.2em !important;
font-size: 1.05em !important;
color: #343a40; /* Darker paragraph text */
}
.markdown-text ul, .markdown-text ol {
padding-left: 1.8em !important;
margin-bottom: 1.2em !important;
font-size: 1.05em !important;
}
.markdown-text li {
margin-bottom: 0.6em !important;
}
.markdown-text strong {
font-weight: 600;
color: #0056b3;
}
.markdown-text a {
color: #0056b3;
text-decoration: none;
}
.markdown-text a:hover {
text-decoration: underline;
}
/* --- Gradio Components Styling --- */
.gradio-container {
max-width: 1300px !important; /* Increased max width */
margin: 25px auto !important; /* Center container with margin */
padding: 30px !important; /* Increased padding */
box-shadow: 0 4px 20px rgba(0, 0, 0, 0.07); /* Slightly adjusted shadow */
border-radius: 12px !important; /* Rounded corners */
background-color: #fafafa !important; /* Slightly off-white background */
border: 1px solid #e9ecef; /* Subtle border */
}
/* --- Tabs --- */
.tab-buttons button {
font-size: 1.1em !important;
padding: 12px 20px !important;
border-radius: 8px 8px 0 0 !important;
border: 1px solid #dee2e6 !important;
border-bottom: none !important;
background-color: #f1f3f5 !important; /* Default tab background slightly darker */
color: #495057 !important;
margin-right: 4px !important;
transition: background-color 0.3s ease, color 0.3s ease;
}
.tab-buttons button.selected {
background-color: #fafafa !important; /* Match container background */
color: #0056b3 !important;
font-weight: 600 !important;
border-color: #dee2e6 #dee2e6 #fafafa !important; /* Hide bottom border */
position: relative;
top: 1px;
}
/* --- Radio Buttons (Category Selector) --- */
#category-selector-radio .gr-form {
display: flex;
flex-wrap: wrap;
gap: 12px;
border: 1px solid #ced4da;
padding: 15px;
border-radius: 8px;
background-color: #f1f3f5; /* Match default tab background */
margin-bottom: 25px; /* Increased margin */
}
#category-selector-radio .gr-form .gr-input-label {
display: none;
}
#category-selector-radio .gr-form > div {
flex-grow: 1;
min-width: 150px;
}
#category-selector-radio .gr-form label {
display: block;
padding: 10px 15px;
border: 1px solid #ced4da;
border-radius: 6px;
background-color: #ffffff; /* White buttons */
cursor: pointer;
text-align: center;
transition: background-color 0.3s ease, border-color 0.3s ease, color 0.3s ease;
font-size: 1.05em;
}
#category-selector-radio .gr-form input[type="radio"] {
display: none;
}
#category-selector-radio .gr-form input[type="radio"]:checked + label {
background-color: #0056b3;
color: #ffffff;
border-color: #004494;
font-weight: 600;
}
#category-selector-radio .gr-form label:hover {
background-color: #e9ecef;
border-color: #adb5bd;
}
#category-selector-radio .gr-form input[type="radio"]:checked + label:hover {
background-color: #004a9e;
}
/* --- Dataframe (Leaderboard Table) --- */
#leaderboard-table {
margin-top: 20px;
border: 1px solid #dee2e6;
border-radius: 8px;
overflow: hidden;
box-shadow: 0 2px 8px rgba(0, 0, 0, 0.06); /* Refined shadow */
}
#leaderboard-table .gr-dataframe {
border: none !important;
border-radius: 0 !important;
}
#leaderboard-table table {
width: 100%;
border-collapse: collapse;
table-layout: auto; /* Let browser handle column widths based on content */
}
#leaderboard-table th, #leaderboard-table td {
padding: 16px 18px !important; /* Increased vertical padding */
border: none !important;
border-bottom: 1px solid #e9ecef !important;
text-align: left;
vertical-align: middle;
font-size: 1.1em !important; /* <<< Increased leaderboard font size */
white-space: normal;
/* Removed fixed widths */
}
#leaderboard-table th {
background-color: #f1f3f5 !important; /* Match other subtle backgrounds */
font-weight: 600 !important;
color: #343a40; /* Darker header text */
font-size: 1.15em !important; /* Slightly larger header font */
border-bottom-width: 2px !important;
border-color: #dee2e6 !important;
white-space: nowrap; /* Prevent header text wrapping */
}
#leaderboard-table tr:last-child td {
border-bottom: none !important;
}
#leaderboard-table tr:nth-child(even) {
background-color: #f8f9fa !important; /* Subtle striping slightly darker */
}
#leaderboard-table tr:hover {
background-color: #e9f5ff !important;
}
#leaderboard-table td a {
color: #0056b3;
text-decoration: none;
font-weight: 500;
}
#leaderboard-table td a:hover {
text-decoration: underline;
color: #003d80;
}
#leaderboard-table td:first-child, /* Rank column */
#leaderboard-table th:first-child {
text-align: center;
font-weight: 600;
/* width: auto; Let browser decide width */
}
#leaderboard-table td:last-child, /* Elo Score column */
#leaderboard-table th:last-child {
text-align: right;
font-weight: 600;
white-space: nowrap; /* Prevent Elo score wrapping */
/* width: auto; Let browser decide width */
}
/* --- Accordion --- */
.gradio-accordion {
border: 1px solid #dee2e6 !important;
border-radius: 8px !important;
margin-bottom: 15px !important;
overflow: hidden;
background-color: #ffffff; /* White background for accordion content area */
}
.gradio-accordion > button,
.gradio-accordion > .gr-panel > button {
background-color: #f1f3f5 !important;
border: none !important;
border-bottom: 1px solid #dee2e6 !important;
padding: 12px 18px !important;
font-size: 1.1em !important;
font-weight: 600 !important;
color: #343a40 !important;
width: 100%;
text-align: left;
cursor: pointer;
}
.gradio-accordion > button[open] { /* Style when accordion is open */
border-bottom: 1px solid #dee2e6 !important;
}
.gradio-accordion > button:hover,
.gradio-accordion > .gr-panel > button:hover {
background-color: #e9ecef !important;
}
.gradio-accordion > div { /* Accordion content */
padding: 15px 18px !important;
background-color: #ffffff; /* Ensure content background is white */
}
#citation-button textarea {
font-family: 'Courier New', Courier, monospace;
font-size: 0.95em !important;
background-color: #f1f3f5; /* Match other subtle backgrounds */
border-radius: 6px;
padding: 12px;
border: 1px solid #dee2e6; /* Add border */
}
#citation-button label {
font-weight: 600;
color: #343a40;
}
/* --- Buttons (General / Submit) --- */
.gr-button {
font-size: 1.05em !important;
padding: 10px 20px !important;
border-radius: 6px !important;
font-weight: 500 !important;
transition: background-color 0.2s ease, border-color 0.2s ease;
}
/* Style specific buttons if needed */
#submit_button { /* Example if you add an ID to the submit button */
background-color: #0069d9 !important;
color: white !important;
border: none !important;
}
#submit_button:hover {
background-color: #0056b3 !important;
}
/* --- Textbox / Dropdown --- */
.gr-input, .gr-dropdown, .gr-textbox textarea {
font-size: 1em !important;
border-radius: 6px !important;
border: 1px solid #ced4da !important;
padding: 10px 12px !important;
background-color: #ffffff; /* Ensure inputs have white background */
}
.gr-input:focus, .gr-dropdown:focus, .gr-textbox textarea:focus {
border-color: #80bdff !important;
box-shadow: 0 0 0 0.2rem rgba(0, 123, 255, 0.25) !important;
}
.gr-input-label > span, /* Label text */
.gr-checkbox-label > span {
font-size: 1em !important;
font-weight: 500 !important;
color: #495057 !important;
margin-bottom: 5px !important;
}
"""
# Combine original CSS (if any) with new enhancements
final_css = custom_css + enhanced_css
# Use a theme for better default styling
demo = gr.Blocks(css=final_css, theme=gr.themes.Soft(
primary_hue=gr.themes.colors.blue,
secondary_hue=gr.themes.colors.sky,
neutral_hue=gr.themes.colors.gray,
font=[gr.themes.GoogleFont("Inter"), "system-ui", "sans-serif"],
radius_size=gr.themes.sizes.radius_md,
))
with demo:
gr.HTML(TITLE)
gr.Markdown(INTRODUCTION_TEXT, elem_classes="markdown-text")
with gr.Tabs(elem_classes="tab-buttons") as tabs:
with gr.TabItem("πŸ… Leaderboard", elem_id="llm-benchmark-tab-table", id=0):
with gr.Column():
gr.Markdown("## Select Category to View Rankings", elem_classes="markdown-text")
category_selector = gr.Radio(
choices=CATEGORIES,
label=None,
value=DEFAULT_CATEGORY,
interactive=True,
elem_id="category-selector-radio"
)
leaderboard_df_component = gr.Dataframe(
value=update_leaderboard(DEFAULT_CATEGORY),
headers=["Rank", "Model", "Organizer", "License", "Elo Score"],
datatype=["number", "html", "str", "str", "number"],
interactive=False,
row_count=(len(master_df), "fixed"),
col_count=(5, "fixed"),
wrap=True,
elem_id="leaderboard-table",
allow_sort=False # <<< Disable sorting arrows
)
category_selector.change(
fn=update_leaderboard,
inputs=category_selector,
outputs=leaderboard_df_component
)
with gr.TabItem("ℹ️ About", elem_id="llm-benchmark-tab-about", id=1):
with gr.Column(scale=2, min_width=600):
gr.Markdown(LLM_BENCHMARKS_TEXT, elem_classes="markdown-text")
# --- Submit Tab (Commented out) ---
# with gr.TabItem("πŸš€ Submit Model", elem_id="llm-benchmark-tab-submit", id=2):
# ... (Submit tab code remains the same) ...
with gr.Accordion("πŸ“™ Citation", open=False, elem_classes="gradio-accordion"):
citation_button = gr.Textbox(
value=CITATION_BUTTON_TEXT,
label=CITATION_BUTTON_LABEL,
lines=8,
elem_id="citation-button",
show_copy_button=True,
)
# --- Scheduler ---
if __name__ == "__main__":
try:
scheduler = BackgroundScheduler(daemon=True)
if callable(restart_space) and REPO_ID and REPO_ID != "your/space-id":
print(f"Scheduling space restart for {REPO_ID} every 30 minutes.")
scheduler.add_job(restart_space, "interval", seconds=1800)
scheduler.start()
else:
# Print appropriate warnings
if not callable(restart_space):
print("Warning: restart_space function not available; space restart job not scheduled.")
if not REPO_ID or REPO_ID == "your/space-id":
print("Warning: REPO_ID not set or is placeholder; space restart job not scheduled.")
except Exception as e:
print(f"Failed to initialize or start scheduler: {e}")
# --- Launch the app ---
if __name__ == "__main__":
print("Launching Gradio App...")
# Use share=True if you need a public link for testing on different devices/networks
demo.launch()