|
import pathlib |
|
from pathlib import Path |
|
import tempfile |
|
from typing import BinaryIO, Literal |
|
import json |
|
import pandas as pd |
|
|
|
import gradio as gr |
|
from datasets import load_dataset |
|
from gradio_leaderboard import ColumnFilter, Leaderboard, SelectColumns |
|
from evaluation import evaluate_problem |
|
from datetime import datetime |
|
import os |
|
|
|
from submit import submit_boundary |
|
from about import PROBLEM_TYPES, TOKEN, CACHE_PATH, API, submissions_repo, results_repo |
|
from utils import read_submission_from_hub, read_result_from_hub, write_results, get_user, make_user_clickable, make_boundary_clickable |
|
from visualize import make_visual |
|
from evaluation import load_boundary, load_boundaries |
|
|
|
def evaluate_boundary(filename): |
|
print(filename) |
|
local_path = read_submission_from_hub(filename) |
|
with Path(local_path).open("r") as f: |
|
raw = f.read() |
|
data_dict = json.loads(raw) |
|
|
|
try: |
|
result = evaluate_problem(data_dict['problem_type'], local_path) |
|
except Exception as e: |
|
raise gr.Error(f'Evaluation failed: {e}. No results written to results dataset.') |
|
|
|
write_results(data_dict, result) |
|
return |
|
|
|
def get_leaderboard(): |
|
ds = load_dataset(results_repo, split='train') |
|
full_df = pd.DataFrame(ds) |
|
full_df['full results'] = full_df['result_filename'].apply(lambda x: make_boundary_clickable(x)).astype(str) |
|
|
|
full_df.rename(columns={'submission_time': 'submission time', 'problem_type': 'problem type'}, inplace=True) |
|
to_show = full_df.copy(deep=True) |
|
to_show = to_show[['submission time', 'problem type', 'user', 'score', 'full results']] |
|
to_show['user'] = to_show['user'].apply(lambda x: make_user_clickable(x)).astype(str) |
|
|
|
return to_show |
|
|
|
def show_output_box(message): |
|
return gr.update(value=message, visible=True) |
|
|
|
def gradio_interface() -> gr.Blocks: |
|
with gr.Blocks() as demo: |
|
gr.Markdown("## Welcome to the ConStellaration Boundary Leaderboard!") |
|
with gr.Tabs(elem_classes="tab-buttons"): |
|
with gr.TabItem("🚀 Leaderboard", elem_id="boundary-benchmark-tab-table"): |
|
gr.Markdown("# Boundary Design Leaderboard") |
|
|
|
Leaderboard( |
|
value=get_leaderboard(), |
|
datatype=['date', 'str', 'html', 'number', 'html'], |
|
select_columns=["submission time", "problem type", "user", "score", "full results"], |
|
search_columns=["submission time", "score", "user"], |
|
|
|
filter_columns=["problem type"], |
|
every=60, |
|
render=True |
|
) |
|
|
|
with gr.TabItem("❔About", elem_id="boundary-benchmark-tab-table"): |
|
gr.Markdown( |
|
""" |
|
## About This Challenge |
|
|
|
**Welcome to the ConStellaration Leaderboard**, a community-driven effort to accelerate fusion energy research using machine learning. |
|
|
|
In collaboration with [Proxima Fusion](https://www.proximafusion.com/), we're inviting the ML and physics communities to optimize plasma configurations for stellarators—a class of fusion reactors that offer steady-state operation and strong stability advantages over tokamaks. |
|
|
|
This leaderboard tracks submissions to a series of open benchmark tasks focused on: |
|
|
|
- **Geometrically optimized stellarators** |
|
- **Simple-to-build quasi-isodynamic (QI) stellarators** |
|
- **Multi-objective, MHD-stable QI stellarators** |
|
|
|
Participants are encouraged to build surrogate models, optimize plasma boundaries, and explore differentiable design pipelines that could replace or accelerate slow traditional solvers like VMEC++. |
|
|
|
### Why It Matters |
|
|
|
Fusion promises clean, abundant, zero-carbon energy. But designing stellarators is computationally intense and geometrically complex. With open datasets, reference baselines, and your contributions, we can reimagine this process as fast, iterative, and ML-native. |
|
|
|
### How to Participate |
|
|
|
- Clone the [ConStellaration dataset](https://huggingface.co/datasets/proxima-fusion/constellaration) |
|
- Build or train your model on the provided QI equilibria |
|
- Submit your predicted boundaries and results here to benchmark against others |
|
- Join the discussion and help expand the frontier of fusion optimization |
|
|
|
Let's bring fusion down to Earth—together. |
|
|
|
""" |
|
) |
|
|
|
|
|
|
|
|
|
with gr.TabItem("🔍 Visualize", elem_id="boundary-benchmark-tab-table"): |
|
ds = load_dataset(results_repo, split='train') |
|
full_df = pd.DataFrame(ds) |
|
filenames = full_df['result_filename'].to_list() |
|
with gr.Row(): |
|
dropdown = gr.Dropdown(choices=filenames, label="Choose a file", value=filenames[0]) |
|
rld_btn = gr.Button(value="Reload") |
|
|
|
plot = gr.Plot() |
|
|
|
def get_boundary_vis(selected_file): |
|
local_path = read_result_from_hub(selected_file) |
|
with Path(local_path).open("r") as f: |
|
raw = f.read() |
|
data_dict = json.loads(raw) |
|
boundary_json = data_dict['boundary_json'] |
|
|
|
if data_dict['problem_type'] == 'mhd_stable': |
|
raise gr.Error("Sorry this isn't implemented for mhd_stable submissions yet!") |
|
else: |
|
boundary = load_boundary(boundary_json) |
|
|
|
vis = make_visual(boundary, inputs=[dropdown]) |
|
return vis |
|
|
|
demo.load(get_boundary_vis, dropdown, plot) |
|
rld_btn.click(get_boundary_vis, dropdown, plot) |
|
|
|
with gr.TabItem("✉️ Submit", elem_id="boundary-benchmark-tab-table"): |
|
gr.Markdown( |
|
""" |
|
# Plasma Boundary Evaluation Submission |
|
Upload your plasma boundary JSON and select the problem type to get your score. |
|
""" |
|
) |
|
filename = gr.State(value=None) |
|
eval_state = gr.State(value=None) |
|
|
|
gr.LoginButton() |
|
|
|
with gr.Row(): |
|
problem_type = gr.Dropdown(PROBLEM_TYPES, label="Problem Type") |
|
boundary_file = gr.File(label="Boundary JSON File (.json)") |
|
|
|
submit_btn = gr.Button("Evaluate") |
|
message = gr.Textbox(label="Status", lines=1, visible=False) |
|
submit_btn.click( |
|
submit_boundary, |
|
inputs=[problem_type, boundary_file], |
|
outputs=[message, filename], |
|
).then( |
|
fn=show_output_box, |
|
inputs=[message], |
|
outputs=[message], |
|
).then( |
|
fn=evaluate_boundary, |
|
inputs=[filename], |
|
outputs=[eval_state] |
|
) |
|
|
|
return demo |
|
|
|
|
|
if __name__ == "__main__": |
|
gradio_interface().launch() |
|
|