|
import pathlib |
|
from pathlib import Path |
|
import tempfile |
|
from typing import BinaryIO, Literal |
|
import json |
|
import pandas as pd |
|
|
|
import gradio as gr |
|
from datasets import load_dataset, Dataset |
|
from huggingface_hub import upload_file, hf_hub_download |
|
from gradio_leaderboard import ColumnFilter, Leaderboard, SelectColumns |
|
from evaluation import evaluate_problem |
|
from datetime import datetime |
|
import os |
|
from huggingface_hub import HfApi |
|
|
|
PROBLEM_TYPES = ["geometrical", "simple_to_build", "mhd_stable"] |
|
TOKEN = os.environ.get("HF_TOKEN") |
|
CACHE_PATH=os.getenv("HF_HOME", ".") |
|
API = HfApi(token=TOKEN) |
|
submissions_repo = "cgeorgiaw/constellaration-submissions" |
|
results_repo = "cgeorgiaw/constellaration-results" |
|
|
|
def submit_boundary( |
|
problem_type: Literal["geometrical", "simple_to_build", "mhd_stable"], |
|
boundary_file: BinaryIO, |
|
user_state, |
|
) -> str: |
|
|
|
|
|
if user_state is None: |
|
raise gr.Error("You must be logged in to submit a file.") |
|
|
|
file_path = boundary_file.name |
|
|
|
if not file_path: |
|
raise gr.Error("Uploaded file object does not have a valid file path.") |
|
|
|
path_obj = pathlib.Path(file_path) |
|
timestamp = datetime.utcnow().isoformat() |
|
|
|
with ( |
|
path_obj.open("rb") as f_in, |
|
tempfile.NamedTemporaryFile(delete=False, suffix=".json") as tmp_boundary, |
|
): |
|
file_content = f_in.read() |
|
tmp_boundary.write(file_content) |
|
tmp_boundary_path = pathlib.Path(tmp_boundary.name) |
|
|
|
|
|
filename = f"{problem_type}/{timestamp.replace(':', '-')}_{problem_type}.json" |
|
record = { |
|
"submission_filename": filename, |
|
"submission_time": timestamp, |
|
"problem_type": problem_type, |
|
"boundary_json": file_content.decode("utf-8"), |
|
"evaluated": False, |
|
"user": user_state, |
|
} |
|
with tempfile.NamedTemporaryFile(mode="w", suffix=".json", delete=False) as tmp: |
|
json.dump(record, tmp, indent=2) |
|
tmp.flush() |
|
tmp_name = tmp.name |
|
|
|
API.upload_file( |
|
path_or_fileobj=tmp_name, |
|
path_in_repo=filename, |
|
repo_id=submissions_repo, |
|
repo_type="dataset", |
|
commit_message=f"Add submission for {problem_type} at {timestamp}" |
|
) |
|
pathlib.Path(tmp_name).unlink() |
|
|
|
'''# then do eval |
|
local_path = read_boundary(filename) |
|
|
|
try: |
|
result = evaluate_problem(problem_type, local_path) |
|
write_results(record, result) |
|
except Exception as e: |
|
raise gr.Error(f"Error during file write:\n{e}") |
|
finally:''' |
|
tmp_boundary_path.unlink() |
|
|
|
return "✅ Your submission has been received! Sit tight and your scores will appear on the leaderboard shortly.", filename |
|
|
|
def read_boundary(filename): |
|
local_path = hf_hub_download( |
|
repo_id=submissions_repo, |
|
repo_type="dataset", |
|
filename=filename, |
|
) |
|
return local_path |
|
|
|
def write_results(record, result): |
|
record.update(result) |
|
record['result_filename'] = record['submission_filename'].strip('.json') + '_results.json' |
|
record['evaluated'] = True |
|
if 'objectives' in record.keys(): |
|
record['objective'] = record.pop('objectives') |
|
|
|
|
|
with tempfile.NamedTemporaryFile(mode="w", suffix=".json", delete=False) as tmp: |
|
json.dump(record, tmp, indent=2) |
|
tmp.flush() |
|
tmp_name = tmp.name |
|
|
|
API.upload_file( |
|
path_or_fileobj=tmp_name, |
|
path_in_repo=record['result_filename'], |
|
repo_id=results_repo, |
|
repo_type="dataset", |
|
commit_message=f"Add result data for {record['result_filename']}" |
|
) |
|
|
|
pathlib.Path(tmp_name).unlink() |
|
return |
|
|
|
def evaluate_boundary(filename): |
|
local_path = read_boundary(filename) |
|
with Path(local_path).open("r") as f: |
|
raw = f.read() |
|
data_dict = json.loads(raw) |
|
result = evaluate_problem(data_dict['problem_type'], local_path) |
|
write_results(data_dict, result) |
|
return |
|
|
|
def get_user(profile: gr.OAuthProfile | None) -> str: |
|
if profile is None: |
|
return "Please login to submit a boundary for evaluation." |
|
return profile.username |
|
|
|
def get_leaderboard(problem_type: str): |
|
ds = load_dataset(results_repo, split='train') |
|
df = pd.DataFrame(ds) |
|
|
|
score_field = "score" if "score" in df.columns else "objective" |
|
|
|
df = df.sort_values(by=score_field, ascending=True) |
|
return df |
|
|
|
def show_output_box(message): |
|
return gr.Textbox.update(value=message, visible=True) |
|
|
|
def gradio_interface() -> gr.Blocks: |
|
with gr.Blocks() as demo: |
|
with gr.Tabs(elem_classes="tab-buttons"): |
|
with gr.TabItem("Leaderboard", elem_id="boundary-benchmark-tab-table"): |
|
gr.Markdown("# Boundary Design Leaderboard") |
|
leaderboard_type = gr.Dropdown(PROBLEM_TYPES, value="simple_to_build", label="Problem Type") |
|
leaderboard_df = get_leaderboard(leaderboard_type) |
|
|
|
Leaderboard( |
|
value=leaderboard_df, |
|
select_columns=["submission_time", "feasibility", "score", "objective", "user"], |
|
search_columns=["submission_time", "score"], |
|
hide_columns=["result_filename", "submission_filename", "minimize_objective", "boundary_json", "evaluated"], |
|
|
|
) |
|
|
|
|
|
|
|
|
|
|
|
|
|
with gr.TabItem("Submit", elem_id="boundary-benchmark-tab-table"): |
|
gr.Markdown( |
|
""" |
|
# Plasma Boundary Evaluation Submission |
|
Upload your plasma boundary JSON and select the problem type to get your score. |
|
""" |
|
) |
|
user_state = gr.State(value=None) |
|
filename = gr.State(value=None) |
|
eval_state = gr.State(value=None) |
|
|
|
gr.LoginButton() |
|
|
|
demo.load(get_user, inputs=None, outputs=user_state) |
|
|
|
with gr.Row(): |
|
problem_type = gr.Dropdown(PROBLEM_TYPES, label="Problem Type") |
|
boundary_file = gr.File(label="Boundary JSON File (.json)") |
|
|
|
boundary_file |
|
message = gr.Textbox(label="Evaluation Result", lines=10, visible=False) |
|
submit_btn = gr.Button("Evaluate") |
|
submit_btn.click( |
|
submit_boundary, |
|
inputs=[problem_type, boundary_file, user_state], |
|
outputs=[message, filename], |
|
).then( |
|
fn=show_output_box, |
|
inputs=[message], |
|
outputs=[message], |
|
) |
|
'''.then( |
|
fn=evaluate_boundary, |
|
inputs=[filename], |
|
outputs=[eval_state] |
|
) |
|
.then( |
|
fn=update_leaderboard, |
|
inputs=[problem_type], |
|
outputs=[leaderboard_df] |
|
)''' |
|
|
|
return demo |
|
|
|
|
|
if __name__ == "__main__": |
|
gradio_interface().launch() |
|
|