Spaces:
Running
Running
import json | |
import os | |
import smtplib | |
from datetime import datetime, timezone | |
from src.display.formatting import styled_error, styled_message, styled_warning | |
from src.envs import API, EVAL_REQUESTS_SUBGRAPH, EVAL_REQUESTS_CAUSALGRAPH, TOKEN, QUEUE_REPO_SUBGRAPH, QUEUE_REPO_CAUSALGRAPH | |
from src.submission.check_validity import ( | |
already_submitted_models, | |
get_model_size, | |
is_model_on_hub, | |
is_valid_predictions, | |
parse_huggingface_url | |
) | |
import gradio as gr | |
REQUESTED_MODELS = None | |
USERS_TO_SUBMISSION_DATES = None | |
def upload_to_queue(track, hf_repo_circ, hf_repo_cg, level, method_name, contact_email, _id): | |
errors = [] | |
hf_repo = hf_repo_circ if "Circuit" in track else hf_repo_cg | |
repo_id, folder_path, revision = parse_huggingface_url(hf_repo) | |
try: | |
user_name, repo_name = repo_id.split("/") | |
except Exception as e: | |
errors.append("Error processing HF URL: could not get username and repo name") | |
if revision is None or revision == "main": | |
try: | |
commit_hash = API.list_repo_commits(repo_id)[0].commit_id | |
except Exception as e: | |
errors.append("Could not get commit hash of provided Huggingface repo") | |
current_time = datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ") | |
if not errors: | |
if "Circuit" in track: | |
eval_entry = { | |
"hf_repo": hf_repo, | |
"user_name": user_name, | |
"revision": commit_hash, | |
"circuit_level": level.lower(), | |
"method_name": method_name, | |
"contact_email": contact_email.lower(), | |
"submit_time": current_time, | |
"status": "PREVALIDATION", | |
"_id": _id | |
} | |
QUEUE_REPO = QUEUE_REPO_SUBGRAPH | |
EVAL_REQUESTS = EVAL_REQUESTS_SUBGRAPH | |
else: | |
eval_entry = { | |
"hf_repo": hf_repo, | |
"user_name": user_name, | |
"revision": commit_hash, | |
"method_name": method_name, | |
"contact_email": contact_email.lower(), | |
"submit_time": current_time, | |
"status": "PREVALIDATION", | |
"_id": _id | |
} | |
QUEUE_REPO = QUEUE_REPO_CAUSALGRAPH | |
EVAL_REQUESTS = EVAL_REQUESTS_CAUSALGRAPH | |
OUT_DIR = f"{EVAL_REQUESTS}/" | |
os.makedirs(OUT_DIR, exist_ok=True) | |
out_path = f"{OUT_DIR}/{method_name}_{_id}_{current_time}.json" | |
with open(out_path, 'w') as f: | |
f.write(json.dumps(eval_entry)) | |
try: | |
API.upload_file( | |
path_or_fileobj=out_path, | |
path_in_repo=out_path.split("/")[-1], | |
repo_id=QUEUE_REPO, | |
repo_type="dataset", | |
commit_message=f"Add {method_name}_{_id}_{current_time}.json to eval queue" | |
) | |
except Exception as e: | |
errors.append(f"Could not upload entry to eval queue: {e}") | |
if errors: | |
status = gr.Textbox("\n\n".join(f"β {e}" for e in errors), visible=True) | |
else: | |
status = gr.Textbox(f"β Submission received! Your submission ID is \"{_id}\". Save this so that you can manage your submission on the queue.", visible=True) | |
return [ | |
status, | |
None, None, | |
gr.Column(visible=False) | |
] | |
def add_new_eval( | |
model_name: str, | |
model_id: str, | |
revision: str, | |
track: str, | |
predictions: dict, | |
): | |
global REQUESTED_MODELS | |
global USERS_TO_SUBMISSION_DATES | |
if not REQUESTED_MODELS: | |
REQUESTED_MODELS, USERS_TO_SUBMISSION_DATES = already_submitted_models(EVAL_REQUESTS_PATH) | |
out_message = "" | |
user_name = "" | |
model_path = model_name | |
if "/" in model_name: | |
user_name = model_name.split("/")[0] | |
model_path = model_name.split("/")[1] | |
current_time = datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ") | |
if track is None: | |
return styled_error("Please select a track.") | |
# Does the model actually exist? | |
if revision == "": | |
revision = "main" | |
out_message = "" | |
# Is the model info correctly filled? | |
print("Made it before 1") | |
try: | |
model_info = API.model_info(repo_id=model_id, revision=revision) | |
except Exception: | |
out_message += styled_warning("Could not get your model information. The leaderboard entry will not have a link to its HF repo.") + "<br>" | |
print("Made it after 1") | |
try: | |
predictions_OK, error_msg = is_valid_predictions(predictions) | |
if not predictions_OK: | |
return styled_error(error_msg) + "<br>" | |
except: | |
return styled_error(error_msg) + "<br>" | |
print("Made it after 3") | |
# Seems good, creating the eval | |
print("Adding new eval") | |
eval_entry = { | |
"model_name": model_name, | |
"hf_repo": model_id, | |
"revision": revision, | |
"track": track, | |
"predictions": predictions, | |
"status": "PENDING", | |
"submitted_time": current_time, | |
} | |
print("Made it after 4") | |
# Check for duplicate submission | |
if f"{model_name}_{revision}_{track}" in REQUESTED_MODELS: | |
return styled_error("A model with this name has been already submitted.") | |
print("Creating eval file") | |
OUT_DIR = f"{EVAL_REQUESTS}/{user_name}" | |
os.makedirs(OUT_DIR, exist_ok=True) | |
out_path = f"{OUT_DIR}/{model_path}_{revision}_eval_request_False_{track}.json" | |
print("Made it after 5") | |
with open(out_path, "w") as f: | |
f.write(json.dumps(eval_entry)) | |
print("Uploading eval file") | |
API.upload_file( | |
path_or_fileobj=out_path, | |
path_in_repo=out_path.split("eval-queue/")[1], | |
repo_id=QUEUE_REPO, | |
repo_type="dataset", | |
commit_message=f"Add {model_name} to eval queue", | |
) | |
print("Made it after 6") | |
# Remove the local file | |
os.remove(out_path) | |
return styled_message( | |
"Your request has been submitted to the evaluation queue!\nPlease wait for up to an hour for the request to show in the PENDING list." | |
) | |
def remove_submission(track: str, method_name: str, _id: str): | |
if track is None: | |
return gr.Textbox(f"Please select a track.", visible=True) | |
if "Circuit" in track: | |
QUEUE_REPO = QUEUE_REPO_SUBGRAPH | |
EVAL_REQUESTS = EVAL_REQUESTS_SUBGRAPH | |
else: | |
QUEUE_REPO = QUEUE_REPO_CAUSALGRAPH | |
EVAL_REQUESTS = EVAL_REQUESTS_CAUSALGRAPH | |
OUT_DIR = f"{EVAL_REQUESTS}/" | |
os.makedirs(OUT_DIR, exist_ok=True) | |
files = os.listdir(OUT_DIR) | |
out_paths = [f for f in files if f.startswith(f"{method_name}_{_id}")] | |
if out_paths: | |
filename = out_paths[0] | |
filepath = os.path.join(OUT_DIR, filename) | |
with open(filepath, 'r') as f: | |
data = json.load(f) | |
hf_repo = data["hf_repo"] | |
try: | |
API.delete_file( | |
path_in_repo=filename, | |
repo_id=QUEUE_REPO, | |
repo_type="dataset" | |
) | |
except Exception as e: | |
return gr.Textbox(f"Could not delete entry from eval queue: {e}", visible=True) | |
os.remove(filepath) | |
status = "Submission removed from queue." | |
else: | |
status = "Submission not found in queue." | |
return gr.Textbox(status, visible=True) |