All models were sampled with their highest available reasoning settings and a maximum token budget.
We also provided the models with a diverse few-shot prompt that is highly supportive for FormulaOne problems,
covering many of the subtle details of state design and maintenance, from a broad array of categories.
""",
elem_classes="markdown-text",
)
gr.HTML(
'Examples of FormulaOne problems
'
)
_latex = [
{"left": "$$", "right": "$$", "display": True},
{"left": "$", "right": "$", "display": False},
{"left": "\\(", "right": "\\)", "display": False},
{"left": "\\[", "right": "\\]", "display": True},
]
md_warmup = gr.Markdown(
value=(
'Brief explanation showcasing the design of a compressed dynamic programming state-space.
'
)
gr.HTML(WHAT_IS_F1_HTML_AFTER_VIDEO)
# Evaluation: Warmup figure
gr.HTML(WHAT_IS_F1_HTML_EVAL_BEFORE_WARMUPFIG, padding=False)
gr.Image(
"assets/warmup_performance.png",
width=600,
show_label=False,
elem_classes=["f1-image"],
show_share_button=False,
show_download_button=False,
show_fullscreen_button=False,
)
gr.HTML('Performance of frontier models on the FormulaOne-Warmup dataset.
')
# Between warmup and tier1 figs
gr.HTML(WHAT_IS_F1_HTML_AFTER_WARMUPFIG)
# Tier 1 figure with corrected caption text
gr.Image(
"assets/tier1_performance.png",
width=600,
show_label=False,
elem_classes=["f1-image"],
show_share_button=False,
show_download_button=False,
show_fullscreen_button=False,
)
gr.HTML(
'Performance of frontier reasoning models on Tier 1 of FormulaOne.
'
)
# Tail after Tier 1 fig
gr.HTML(WHAT_IS_F1_HTML_AFTER_TIER1FIG_TAIL)
# Rename tab to "Leaderboard" and cap at 800px width
with gr.TabItem("Leaderboard", elem_id="formulaone-leaderboard-tab-table", id=2):
gr.Markdown(
"""
Welcome to the FormulaOne leaderboard. This table tracks the performance of various systems on the FormulaOne benchmark.
Use the "Select Columns to Display" dropdown to customize your view, and the search bar to find specific models or organizations.
""",
elem_classes="markdown-text",
)
refresh_leaderboard_data()
assert leaderboard_df is not None
leaderboard_component = init_leaderboard(leaderboard_df)
with gr.TabItem("Submit Solutions", elem_id="formulaone-submit-tab-table", id=3):
logger.info("Tab submission")
with gr.Column():
with gr.Row():
gr.Markdown(EVALUATION_QUEUE_TEXT, elem_classes="markdown-text")
with gr.Row():
gr.Markdown("# ✉️✨ Submit your solutions", elem_classes="markdown-text")
login_box = gr.Group(visible=True, elem_id="f1-login-box")
with login_box:
gr.Markdown("Please sign in with Hugging Face to submit")
gr.LoginButton(elem_id="hf-login-btn")
submit_panel = gr.Group(visible=False, elem_classes="markdown-text")
with submit_panel:
with gr.Row():
with gr.Column():
gr.Markdown(SUBMISSION_TERMS_TEXT, elem_classes="markdown-text")
system_name_textbox = gr.Textbox(label=AutoEvalColumn.system.name)
org_textbox = gr.Textbox(label=AutoEvalColumn.organization.name)
submission_file = gr.File(label="JSONL solutions file", file_types=[".jsonl"])
logger.info("Submit button")
submit_button = gr.Button("Submit", variant="primary")
submission_result = gr.Markdown()
submit_button.click(
add_solution_cbk,
[
system_name_textbox,
org_textbox,
submission_file,
],
submission_result,
)
with gr.Row():
logger.info("Citation")
with gr.Accordion(CITATION_BUTTON_LABEL, open=False):
gr.Code(
value=CITATION_BUTTON_TEXT.strip(),
elem_id="citation-block",
)
blocks.load(lambda: leaderboard_df, inputs=[], outputs=[leaderboard_component])
blocks.load(gate_submission, inputs=None, outputs=[login_box, submit_panel])
logger.info("Scheduler")
scheduler = BackgroundScheduler()
scheduler.add_job(restart_space, "interval", seconds=1800)
scheduler.add_job(refresh_leaderboard_data, "interval", seconds=120)
scheduler.start()
logger.info("Launch")
blocks.queue(default_concurrency_limit=40).launch()
logger.info("Done")