File size: 8,898 Bytes
281711d 1c33a6b 281711d 2982a51 281711d 3e3ca09 b8be656 281711d 51aaefc 281711d 63bdadc f2bcfea 04ba2d3 7f71cc7 956b725 1c33a6b c6e6b77 f2bcfea 1c33a6b 3c0df4a 8e2e988 1c33a6b 63bdadc f6f3d19 3c0df4a be51a4e 3c0df4a 7f71cc7 f6f3d19 7f71cc7 3c0df4a 2982a51 3c0df4a 2982a51 82c5741 50e75cf 82c5741 281711d 390e86b 20a6e65 939fb39 20a6e65 0003044 2cdfab6 3c0df4a ff54cee be51a4e 3c0df4a be51a4e 2cdfab6 0003044 e1db249 f616e79 939fb39 3e3ca09 5a02026 3e3ca09 d6d49d1 b57f917 bca065d 3068dd3 7f71cc7 2cb8526 53dc1cd 88e80bb 53dc1cd 10fb6b1 88e80bb 53dc1cd 7f71cc7 10fb6b1 f2bcfea 7f71cc7 f8173e9 7f71cc7 4ce3a3e 10fb6b1 7f71cc7 939fb39 b8be656 20a6e65 b8be656 1c33a6b 92d0002 e8b7916 92d0002 1e4ed96 b8be656 92d0002 b8be656 52940ae 302874c b8be656 92d0002 1c33a6b 82c5741 1c33a6b 3e3ca09 1c33a6b d6d49d1 281711d |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 |
import pathlib
from pathlib import Path
import tempfile
from typing import BinaryIO, Literal
import json
import pandas as pd
import gradio as gr
from datasets import load_dataset
from gradio_leaderboard import ColumnFilter, Leaderboard, SelectColumns
from evaluation import evaluate_problem
from datetime import datetime
import os
from submit import submit_boundary
from about import PROBLEM_TYPES, TOKEN, CACHE_PATH, API, submissions_repo, results_repo
from utils import read_submission_from_hub, read_result_from_hub, write_results, get_user, make_user_clickable, make_boundary_clickable
from visualize import make_visual
from evaluation import load_boundary, load_boundaries
def evaluate_boundary(filename):
print(filename)
local_path = read_submission_from_hub(filename)
with Path(local_path).open("r") as f:
raw = f.read()
data_dict = json.loads(raw)
try:
result = evaluate_problem(data_dict['problem_type'], local_path)
except Exception as e:
raise gr.Error(f'Evaluation failed: {e}. No results written to results dataset.')
write_results(data_dict, result)
return
def get_leaderboard():
ds = load_dataset(results_repo, split='train', download_mode="force_redownload")
full_df = pd.DataFrame(ds)
full_df['full results'] = full_df['result_filename'].apply(lambda x: make_boundary_clickable(x)).astype(str)
full_df.rename(columns={'submission_time': 'submission time', 'problem_type': 'problem type'}, inplace=True)
to_show = full_df.copy(deep=True)
to_show = to_show[to_show['user'] != 'test']
to_show = to_show[['submission time', 'problem type', 'user', 'score', 'full results']]
to_show['user'] = to_show['user'].apply(lambda x: make_user_clickable(x)).astype(str)
return to_show
def show_output_box(message):
return gr.update(value=message, visible=True)
def gradio_interface() -> gr.Blocks:
with gr.Blocks() as demo:
gr.Markdown("## Welcome to the ConStellaration Boundary Leaderboard!")
with gr.Tabs(elem_classes="tab-buttons"):
with gr.TabItem("🚀 Leaderboard", elem_id="boundary-benchmark-tab-table"):
gr.Markdown("# Boundary Design Leaderboard")
Leaderboard(
value=get_leaderboard(),
datatype=['date', 'str', 'html', 'number', 'html'],
select_columns=["submission time", "problem type", "user", "score", "full results"],
search_columns=["submission time", "score", "user"],
# hide_columns=["result_filename", "submission_filename", "objective", "minimize_objective", "boundary_json", "evaluated"],
filter_columns=["problem type"],
every=60,
render=True
)
gr.Markdown("For the `geometrical` and `simple_to_build`, the scores are bounded between 0.0 and 1.0, where 1.0 is the best possible score. For the `mhd_stable` multi-objective problem, the score is unbounded with a undefined maximum score.")
with gr.TabItem("❔About", elem_id="boundary-benchmark-tab-table"):
gr.Markdown(
"""
## About This Challenge
**Welcome to the ConStellaration Leaderboard**, a community-driven effort to accelerate fusion energy research using machine learning.
In collaboration with [Proxima Fusion](https://www.proximafusion.com/), we're inviting the ML and physics communities to optimize plasma configurations for stellarators—a class of fusion reactors that offer steady-state operation and strong stability advantages over tokamaks.
This leaderboard tracks submissions to a series of open benchmark tasks focused on:
- **Geometrically optimized stellarators**
- **Simple-to-build quasi-isodynamic (QI) stellarators**
- **Multi-objective, MHD-stable QI stellarators**
Participants are encouraged to build surrogate models, optimize plasma boundaries, and explore differentiable design pipelines that could replace or accelerate slow traditional solvers like VMEC++.
### Why It Matters
Fusion promises clean, abundant, zero-carbon energy. But designing stellarators is computationally intense and geometrically complex. With open datasets, reference baselines, and your contributions, we can reimagine this process as fast, iterative, and ML-native.
### How to Participate
- Clone the [ConStellaration dataset](https://huggingface.co/datasets/proxima-fusion/constellaration)
- Build or train your model on the provided QI equilibria
- Submit your predicted boundaries and results here to benchmark against others
- Join the discussion and help expand the frontier of fusion optimization
Let's bring fusion down to Earth—together.
"""
)
# dropdown = gr.Dropdown(choices=filenames, label="Choose a file")
# plot_output = gr.Plot()
with gr.TabItem("🔍 Visualize", elem_id="boundary-benchmark-tab-table"):
ds = load_dataset(results_repo, split='train', download_mode="force_redownload")
full_df = pd.DataFrame(ds)
filenames = full_df['result_filename'].to_list()
with gr.Row():
with gr.Column():
dropdown = gr.Dropdown(choices=filenames, label="Choose a leaderboard entry", value=filenames[0])
rld_btn = gr.Button(value="Reload")
with gr.Column():
plot = gr.Plot()
def get_boundary_vis(selected_file):
local_path = read_result_from_hub(selected_file)
with Path(local_path).open("r") as f:
raw = f.read()
data_dict = json.loads(raw)
boundary_json = data_dict['boundary_json']
if data_dict['problem_type'] == 'mhd_stable':
raise gr.Error("Sorry this isn't implemented for mhd_stable submissions yet!")
else:
boundary = load_boundary(boundary_json)
vis = make_visual(boundary)
return vis
demo.load(get_boundary_vis, dropdown, plot)
rld_btn.click(get_boundary_vis, dropdown, plot)
with gr.TabItem("✉️ Submit", elem_id="boundary-benchmark-tab-table"):
gr.Markdown(
"""
# Plasma Boundary Evaluation Submission
Upload your plasma boundary JSON and select the problem type to get your score.
"""
)
filename = gr.State(value=None)
eval_state = gr.State(value=None)
user_state = gr.State(value=None)
# gr.LoginButton()
with gr.Row():
with gr.Column():
problem_type = gr.Dropdown(PROBLEM_TYPES, label="Problem Type")
username_input = gr.Textbox(
label="Username",
placeholder="Enter your Hugging Face username",
info="This will be displayed on the leaderboard."
)
with gr.Column():
boundary_file = gr.File(label="Boundary JSON File (.json)")
username_input.change(
fn=lambda x: x if x.strip() else None,
inputs=username_input,
outputs=user_state
)
submit_btn = gr.Button("Evaluate")
message = gr.Textbox(label="Status", lines=1, visible=False)
# help message
gr.Markdown("If you have issues with submission or using the leaderboard, please start a discussion in the Community tab of this Space.")
submit_btn.click(
submit_boundary,
inputs=[problem_type, boundary_file, user_state],
outputs=[message, filename],
).then(
fn=show_output_box,
inputs=[message],
outputs=[message],
).then(
fn=evaluate_boundary,
inputs=[filename],
outputs=[eval_state]
)
return demo
if __name__ == "__main__":
gradio_interface().launch()
|