# app.py
import math
import gradio as gr
import pandas as pd
import plotly.graph_objects as go
from apscheduler.schedulers.background import BackgroundScheduler
from gradio_leaderboard import Leaderboard, SelectColumns
from huggingface_hub import whoami
# HTML is split so we can inject Gradio media (images/video) where needed.
from src.about import WHAT_IS_F1_HTML_AFTER_VIDEO # text immediately after the video
from src.about import WHAT_IS_F1_HTML_AFTER_WARMUPFIG # text between warmup/tier1 figs
from src.about import WHAT_IS_F1_HTML_BOTTOM_A_AFTER_TABS # text after the heading, before the first figure
from src.about import WHAT_IS_F1_HTML_BOTTOM_A_BEFORE_TABS # up to (and including) the "Infinite Well" heading
from src.about import WHAT_IS_F1_HTML_EVAL_BEFORE_WARMUPFIG # evaluation section up to before Warmup fig
from src.about import ( # tail after Tier1 fig; ⬅️ split to insert the tabs right after the heading
CITATION_BUTTON_LABEL,
CITATION_BUTTON_TEXT,
EVALUATION_QUEUE_TEXT,
SUBMISSION_TERMS_TEXT,
WHAT_IS_F1_HTML_AFTER_TIER1FIG_TAIL,
WHAT_IS_F1_HTML_TOP,
)
from src.datamodel.data import F1Data
from src.display.css_html_js import custom_css
from src.display.formatting import styled_error
from src.display.utils import AutoEvalColumn, fields
from src.envs import API, CODE_PROBLEMS_REPO, REPO_ID, RESULTS_REPO, SUBMISSIONS_REPO
from src.logger import get_logger
from src.populate import get_leaderboard_df
from src.submission.submit import add_new_solutions, fetch_user_info
from src.validation.validate import MAX_INPUT_LENGTH, MIN_INPUT_LENGTH, is_submission_file_valid, is_valid
logger = get_logger(__name__)
ENSURE_ALL_PRESENT = False # TODO: Switch to True.
SPLIT = "warmup" # TODO temp
lbdb = F1Data(
cp_ds_name=CODE_PROBLEMS_REPO,
sub_ds_name=SUBMISSIONS_REPO,
res_ds_name=RESULTS_REPO,
split=SPLIT,
)
leaderboard_df = None
logger.info("Initialized LBDB")
def restart_space():
logger.info("Restarting space")
API.restart_space(repo_id=REPO_ID)
def refresh_leaderboard_data():
"""Refresh the leaderboard data from the latest results"""
global leaderboard_df
try:
logger.info("Loading leaderboard data...")
new_leaderboard_df = get_leaderboard_df(RESULTS_REPO)
if new_leaderboard_df is not None:
logger.info("Leaderboard data refreshed successfully")
leaderboard_df = new_leaderboard_df
else:
logger.warning("No new leaderboard data found")
return None
except Exception as e:
logger.error(f"Error refreshing leaderboard data: {e}")
return None
def init_leaderboard(dataframe: pd.DataFrame):
if dataframe is None:
raise ValueError("Leaderboard DataFrame is None.")
lb = Leaderboard(
value=dataframe,
datatype=[c.type for c in fields(AutoEvalColumn)],
select_columns=SelectColumns(
default_selection=[c.name for c in fields(AutoEvalColumn) if c.displayed_by_default],
cant_deselect=[c.name for c in fields(AutoEvalColumn) if c.never_hidden],
label="Select Columns to Display:",
),
search_columns=[AutoEvalColumn.system.name, AutoEvalColumn.organization.name],
hide_columns=[c.name for c in fields(AutoEvalColumn) if c.hidden],
bool_checkboxgroup_label="Hide models",
interactive=False,
)
lb.col_count = (1, "fixed")
return lb
def add_solution_cbk(
system_name: str,
org: str,
submission_path: str,
profile: gr.OAuthProfile | None,
oauth_token: gr.OAuthToken | None,
):
logger.info("Fetching user details for submission")
logger.info("PROFILE %s", profile)
logger.info("TOKEN %s", oauth_token)
if profile is None or oauth_token is None:
return styled_error("Please sign in with Hugging Face before submitting.")
# Display handle and display name (may change over time)
logger.info(f"User handle: {profile.username}")
display_name = profile.name or profile.username
logger.info(f"Display name: {display_name}")
# Stable account id
user_info = fetch_user_info(oauth_token)
logger.info("Logged in user info: %s", user_info)
stable_id = user_info.get("id") if user_info else None
logger.info(f"User stable ID: {stable_id}")
if not stable_id:
return styled_error("Could not retrieve your stable user ID. Please try signing in again.")
user_id = stable_id
if not profile.username:
return styled_error("Could not retrieve username. Please try signing in again.")
try:
# Validating the submission file.
if not submission_path:
return styled_error("Please upload JSONL submission file.")
if not is_submission_file_valid(
submission_path,
is_warmup_dataset=(SPLIT == "warmup"),
):
return styled_error("Failed to read JSONL submission file. Please try again later.")
# Validating all user-supplied arguments.
sys_type = "default" # Placeholder
for val, val_name in [
(system_name, "System name"),
(org, "Organisation name"),
# (sys_type, "System type"),
]:
if len(val) == 0:
return styled_error(f"Please fill in the '{val_name}' field.")
if not is_valid(val):
return styled_error(
f"{val_name} is invalid! Must only contain characters [a-zA-Z0-9], spaces, "
+ "or the special characters '-' and '.', and be of length between "
+ f"{MIN_INPUT_LENGTH} and {MAX_INPUT_LENGTH}."
)
except Exception:
logger.warning("Failed to process user submission", exc_info=True)
return styled_error("An error occurred. Please try again later.") # Intentionally vague.
return add_new_solutions(
lbdb,
profile.username,
user_id,
system_name,
org,
sys_type, # Passing the placeholder
submission_path,
is_warmup_dataset=(SPLIT == "warmup"),
ensure_all_present=ENSURE_ALL_PRESENT,
)
def gate_submission(oauth_token: gr.OAuthToken | None):
"""
@brief Toggles the visibility of the login box and submission panel based on the user's login status.
"""
logger.info("GATE TOKEN %s", oauth_token)
if oauth_token is None:
logger.info("GATE: NO TOKEN")
return gr.update(visible=True), gr.update(visible=False)
try:
whoami(oauth_token.token)
logger.info("GATE: TOKEN IS VALID")
return gr.update(visible=False), gr.update(visible=True)
except Exception:
logger.info("GATE: TOKEN HAS EXPIRED")
return gr.update(visible=True), gr.update(visible=False)
def get_theme():
# return gr.themes.Soft(
# primary_hue=gr.themes.colors.blue,
# secondary_hue=gr.themes.colors.sky,
# neutral_hue=gr.themes.colors.gray,
# ).set(
# body_background_fill="#FFFFFF",
# panel_background_fill="#f3f4f6",
# )
return "light"
# --- Gradio-based tabs for examples (no JS in HTML) ---
def _select_example_tab(choice: str):
return (
gr.update(visible=(choice == "Warmup")),
gr.update(visible=(choice == "Tier 1")),
gr.update(visible=(choice == "Tier 2")),
)
# === Static, made-up results for the landing chart (not tied to leaderboard) ===
TIER_TOTALS = {"Warmup": 100, "Tier 1": 100, "Tier 2": 20} # dataset sizes
MODELS_ORDER = ["GPT-5", "Gemini 2.5 Pro", "Grok 4", "Claude Opus 4", "o3 Pro"]
STATIC_RESULTS = {
"Warmup": {
"GPT-5": 95,
"Gemini 2.5 Pro": 90,
"Grok 4": 84,
"Claude Opus 4": 92,
"o3 Pro": 88,
},
"Tier 1": {
"GPT-5": 38,
"Gemini 2.5 Pro": 30,
"Grok 4": 24,
"Claude Opus 4": 35,
"o3 Pro": 28,
},
"Tier 2": {
"GPT-5": 1,
"Gemini 2.5 Pro": 0,
"Grok 4": 0,
"Claude Opus 4": 0,
"o3 Pro": 0,
},
}
MODEL_RELEASES = {
"GPT-5": "2025-08-07",
"Gemini 2.5 Pro": "2025-03-25",
"Grok 4": "2025-07-09",
"Claude Opus 4": "2025-05-22",
"o3 Pro": "2025-06-10",
}
TIER_TOTALS = {"Warmup": 100, "Tier 1": 100, "Tier 2": 20}
MODELS_ORDER = ["GPT-5", "Gemini 2.5 Pro", "Grok 4", "Claude Opus 4", "o3 Pro"]
ACCURACY_PCT = {
"Warmup": {
"GPT-5": 38,
"Gemini 2.5 Pro": 35,
"Grok 4": 28,
"Claude Opus 4": 32,
"o3 Pro": 30,
},
"Tier 1": {
"GPT-5": 3,
"Gemini 2.5 Pro": 2,
"Grok 4": 1,
"Claude Opus 4": 2,
"o3 Pro": 2,
},
"Tier 2": {
"GPT-5": 0,
"Gemini 2.5 Pro": 0,
"Grok 4": 0,
"Claude Opus 4": 0,
"o3 Pro": 0,
},
}
def build_accuracy_figure(tier: str):
"""Interactive scatter: x = release date, y = accuracy (%). Hover shows solved/total."""
total = TIER_TOTALS[tier]
fig = go.Figure()
for model in MODELS_ORDER:
date_str = MODEL_RELEASES[model]
y = ACCURACY_PCT[tier][model]
solved = round(y * total / 100)
fig.add_trace(
go.Scatter(
x=[date_str],
y=[y],
mode="markers",
name=model,
marker=dict(size=12, line=dict(width=1)),
hovertemplate=(
f"{model}
"
"Release: %{x|%b %d, %Y}
"
"Accuracy: %{y:.1f}%
"
f"Solved: {solved}/{total}"
"
Footnote. All models were sampled with their highest available reasoning settings and a generous token budget. We also used a diverse few-shot prompt that is highly supportive for these problems, covering many of the subtle details inherent in the tasks (state design, invariants, and bag transformations).
Union-of-Paths-and-Cycles
Maximal-Union-of-Paths-and-Cycles
Maximal-Union-of-Cycles