File size: 7,467 Bytes
281711d 1c33a6b 281711d 2982a51 281711d 8efd41a b20af37 b8be656 281711d 51aaefc 281711d 51aaefc fb695b7 51aaefc b20af37 956b725 281711d 8efd41a 281711d e8b7916 281711d 8efd41a e8b7916 281711d e8b7916 281711d e8b7916 281711d 0c144dc 281711d 0c144dc 8efd41a f3c8f85 8efd41a 956b725 8efd41a f3c8f85 b20af37 e8b7916 8efd41a 0c144dc 2a0354e 0c144dc 8efd41a b20af37 8efd41a 0c144dc 8efd41a 1c33a6b b20af37 281711d b20af37 956b725 281711d 1c33a6b 281711d 1c33a6b 281711d b20af37 956b725 2982a51 30bf457 2982a51 956b725 1c33a6b e8b7916 1e4ed96 6736504 956b725 2982a51 2a5d0b4 1ff7bc3 2982a51 b8be656 2982a51 82c5741 281711d 20a6e65 2a5d0b4 20a6e65 1ff7bc3 20a6e65 1e4ed96 20a6e65 d6d49d1 6736504 d6d49d1 6736504 d6d49d1 20a6e65 b8be656 20a6e65 b8be656 e8b7916 1c33a6b e8b7916 1e4ed96 e8b7916 1e4ed96 b8be656 82c5741 b8be656 1c33a6b b8be656 e8b7916 1c33a6b 82c5741 1c33a6b 6736504 1c33a6b d6d49d1 6736504 d6d49d1 281711d |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 |
import pathlib
from pathlib import Path
import tempfile
from typing import BinaryIO, Literal
import json
import pandas as pd
import gradio as gr
from datasets import load_dataset, Dataset
from huggingface_hub import upload_file, hf_hub_download
from gradio_leaderboard import ColumnFilter, Leaderboard, SelectColumns
from evaluation import evaluate_problem
from datetime import datetime
import os
from huggingface_hub import HfApi
PROBLEM_TYPES = ["geometrical", "simple_to_build", "mhd_stable"]
TOKEN = os.environ.get("HF_TOKEN")
CACHE_PATH=os.getenv("HF_HOME", ".")
API = HfApi(token=TOKEN)
submissions_repo = "cgeorgiaw/constellaration-submissions"
results_repo = "cgeorgiaw/constellaration-results"
def submit_boundary(
problem_type: Literal["geometrical", "simple_to_build", "mhd_stable"],
boundary_file: BinaryIO,
user_state,
) -> str:
# error handling
if user_state is None:
raise gr.Error("You must be logged in to submit a file.")
file_path = boundary_file.name
if not file_path:
raise gr.Error("Uploaded file object does not have a valid file path.")
path_obj = pathlib.Path(file_path)
timestamp = datetime.utcnow().isoformat()
with (
path_obj.open("rb") as f_in,
tempfile.NamedTemporaryFile(delete=False, suffix=".json") as tmp_boundary,
):
file_content = f_in.read()
tmp_boundary.write(file_content)
tmp_boundary_path = pathlib.Path(tmp_boundary.name)
# write to dataset
filename = f"{problem_type}/{timestamp.replace(':', '-')}_{problem_type}.json"
record = {
"submission_filename": filename,
"submission_time": timestamp,
"problem_type": problem_type,
"boundary_json": file_content.decode("utf-8"),
"evaluated": False,
"user": user_state,
}
with tempfile.NamedTemporaryFile(mode="w", suffix=".json", delete=False) as tmp:
json.dump(record, tmp, indent=2)
tmp.flush()
tmp_name = tmp.name
API.upload_file(
path_or_fileobj=tmp_name,
path_in_repo=filename,
repo_id=submissions_repo,
repo_type="dataset",
commit_message=f"Add submission for {problem_type} at {timestamp}"
)
pathlib.Path(tmp_name).unlink()
'''# then do eval
local_path = read_boundary(filename)
try:
result = evaluate_problem(problem_type, local_path)
write_results(record, result)
except Exception as e:
raise gr.Error(f"Error during file write:\n{e}")
finally:'''
tmp_boundary_path.unlink()
return "✅ Your submission has been received! Sit tight and your scores will appear on the leaderboard shortly.", filename
def read_boundary(filename):
local_path = hf_hub_download(
repo_id=submissions_repo,
repo_type="dataset",
filename=filename,
)
return local_path
def write_results(record, result):
record.update(result)
record['result_filename'] = record['submission_filename'].strip('.json') + '_results.json'
record['evaluated'] = True
if 'objectives' in record.keys():
record['objective'] = record.pop('objectives')
with tempfile.NamedTemporaryFile(mode="w", suffix=".json", delete=False) as tmp:
json.dump(record, tmp, indent=2)
tmp.flush()
tmp_name = tmp.name
API.upload_file(
path_or_fileobj=tmp_name,
path_in_repo=record['result_filename'],
repo_id=results_repo,
repo_type="dataset",
commit_message=f"Add result data for {record['result_filename']}"
)
pathlib.Path(tmp_name).unlink()
return
def evaluate_boundary(filename):
local_path = read_boundary(filename)
with Path(local_path).open("r") as f:
raw = f.read()
data_dict = json.loads(raw)
result = evaluate_problem(data_dict['problem_type'], local_path)
write_results(data_dict, result)
return
def get_user(profile: gr.OAuthProfile | None) -> str:
if profile is None:
return "Please login to submit a boundary for evaluation."
return profile.username
def get_leaderboard(problem_type: str):
ds = load_dataset(results_repo, split='train')
df = pd.DataFrame(ds)
score_field = "score" if "score" in df.columns else "objective" # fallback
df = df.sort_values(by=score_field, ascending=True)
return df
def show_output_box(message):
return gr.Textbox.update(value=message, visible=True)
def gradio_interface() -> gr.Blocks:
with gr.Blocks() as demo:
with gr.Tabs(elem_classes="tab-buttons"):
with gr.TabItem("Leaderboard", elem_id="boundary-benchmark-tab-table"):
gr.Markdown("# Boundary Design Leaderboard")
leaderboard_type = gr.Dropdown(PROBLEM_TYPES, value="simple_to_build", label="Problem Type")
leaderboard_df = get_leaderboard(leaderboard_type)
Leaderboard(
value=leaderboard_df,
select_columns=["submission_time", "feasibility", "score", "objective", "user"],
search_columns=["submission_time", "score"],
hide_columns=["result_filename", "submission_filename", "minimize_objective", "boundary_json", "evaluated"],
# filter_columns=["T", "Precision", "Model Size"],
)
# def update_leaderboard(problem_type):
# return get_leaderboard(problem_type)
# leaderboard_type.change(fn=update_leaderboard, inputs=leaderboard_type, outputs=leaderboard_df)
with gr.TabItem("Submit", elem_id="boundary-benchmark-tab-table"):
gr.Markdown(
"""
# Plasma Boundary Evaluation Submission
Upload your plasma boundary JSON and select the problem type to get your score.
"""
)
user_state = gr.State(value=None)
filename = gr.State(value=None)
eval_state = gr.State(value=None)
gr.LoginButton()
demo.load(get_user, inputs=None, outputs=user_state)
with gr.Row():
problem_type = gr.Dropdown(PROBLEM_TYPES, label="Problem Type")
boundary_file = gr.File(label="Boundary JSON File (.json)")
boundary_file
message = gr.Textbox(label="Evaluation Result", lines=10, visible=False)
submit_btn = gr.Button("Evaluate")
submit_btn.click(
submit_boundary,
inputs=[problem_type, boundary_file, user_state],
outputs=[message, filename],
).then(
fn=show_output_box,
inputs=[message],
outputs=[message],
)
'''.then(
fn=evaluate_boundary,
inputs=[filename],
outputs=[eval_state]
)
.then(
fn=update_leaderboard,
inputs=[problem_type],
outputs=[leaderboard_df]
)'''
return demo
if __name__ == "__main__":
gradio_interface().launch()
|