|
import pathlib |
|
from pathlib import Path |
|
import tempfile |
|
from typing import BinaryIO, Literal |
|
import json |
|
import pandas as pd |
|
|
|
import gradio as gr |
|
from datasets import load_dataset, Dataset |
|
from huggingface_hub import upload_file, hf_hub_download |
|
from gradio_leaderboard import ColumnFilter, Leaderboard, SelectColumns |
|
from evaluation import evaluate_problem |
|
from datetime import datetime |
|
import os |
|
|
|
from submit import submit_boundary |
|
from about import PROBLEM_TYPES, TOKEN, CACHE_PATH, API, submissions_repo, results_repo |
|
|
|
def read_boundary(filename): |
|
local_path = hf_hub_download( |
|
repo_id=submissions_repo, |
|
repo_type="dataset", |
|
filename=filename, |
|
) |
|
return local_path |
|
|
|
def write_results(record, result): |
|
record.update(result) |
|
record['result_filename'] = record['submission_filename'].strip('.json') + '_results.json' |
|
record['evaluated'] = True |
|
if 'objectives' in record.keys(): |
|
record['objective'] = record.pop('objectives') |
|
record['minimize_objective'] = True |
|
|
|
|
|
with tempfile.NamedTemporaryFile(mode="w", suffix=".json", delete=False) as tmp: |
|
json.dump(record, tmp, indent=2) |
|
tmp.flush() |
|
tmp_name = tmp.name |
|
|
|
API.upload_file( |
|
path_or_fileobj=tmp_name, |
|
path_in_repo=record['result_filename'], |
|
repo_id=results_repo, |
|
repo_type="dataset", |
|
commit_message=f"Add result data for {record['result_filename']}" |
|
) |
|
|
|
pathlib.Path(tmp_name).unlink() |
|
return |
|
|
|
def evaluate_boundary(filename): |
|
local_path = read_boundary(filename) |
|
with Path(local_path).open("r") as f: |
|
raw = f.read() |
|
data_dict = json.loads(raw) |
|
result = evaluate_problem(data_dict['problem_type'], local_path) |
|
write_results(data_dict, result) |
|
return |
|
|
|
def get_user(profile: gr.OAuthProfile | None) -> str: |
|
if profile is None: |
|
return "Please login to submit a boundary for evaluation." |
|
return profile.username |
|
|
|
def get_leaderboard(): |
|
ds = load_dataset(results_repo, split='train') |
|
df = pd.DataFrame(ds) |
|
|
|
score_field = "score" if "score" in df.columns else "objective" |
|
|
|
df = df.sort_values(by=score_field, ascending=True) |
|
return df |
|
|
|
def show_output_box(message): |
|
return gr.Textbox.update(value=message, visible=True) |
|
|
|
def gradio_interface() -> gr.Blocks: |
|
with gr.Blocks() as demo: |
|
with gr.Tabs(elem_classes="tab-buttons"): |
|
with gr.TabItem("Leaderboard", elem_id="boundary-benchmark-tab-table"): |
|
gr.Markdown("# Boundary Design Leaderboard") |
|
leaderboard_df = get_leaderboard() |
|
|
|
Leaderboard( |
|
value=leaderboard_df, |
|
select_columns=["submission_time", "feasibility", "score", "objective", "user"], |
|
search_columns=["submission_time", "score", "user"], |
|
hide_columns=["result_filename", "submission_filename", "minimize_objective", "boundary_json", "evaluated"], |
|
filter_columns=["problem_type", "submission_time"], |
|
) |
|
|
|
|
|
|
|
|
|
|
|
|
|
with gr.TabItem("Submit", elem_id="boundary-benchmark-tab-table"): |
|
gr.Markdown( |
|
""" |
|
# Plasma Boundary Evaluation Submission |
|
Upload your plasma boundary JSON and select the problem type to get your score. |
|
""" |
|
) |
|
user_state = gr.State(value=None) |
|
filename = gr.State(value=None) |
|
eval_state = gr.State(value=None) |
|
|
|
gr.LoginButton() |
|
|
|
demo.load(get_user, inputs=None, outputs=user_state) |
|
|
|
with gr.Row(): |
|
problem_type = gr.Dropdown(PROBLEM_TYPES, label="Problem Type") |
|
boundary_file = gr.File(label="Boundary JSON File (.json)") |
|
|
|
boundary_file |
|
message = gr.Textbox(label="Evaluation Result", lines=10, visible=False) |
|
submit_btn = gr.Button("Evaluate") |
|
submit_btn.click( |
|
submit_boundary, |
|
inputs=[problem_type, boundary_file, user_state], |
|
outputs=[message, filename], |
|
).then( |
|
fn=show_output_box, |
|
inputs=[message], |
|
outputs=[message], |
|
) |
|
'''.then( |
|
fn=evaluate_boundary, |
|
inputs=[filename], |
|
outputs=[eval_state] |
|
) |
|
.then( |
|
fn=update_leaderboard, |
|
inputs=[problem_type], |
|
outputs=[leaderboard_df] |
|
)''' |
|
|
|
return demo |
|
|
|
|
|
if __name__ == "__main__": |
|
gradio_interface().launch() |
|
|