Spaces:
Running
on
CPU Upgrade
Running
on
CPU Upgrade
File size: 6,430 Bytes
c887522 d59421c 7ded1c5 0061e14 c887522 d59421c 9a983a8 416ebf1 7ded1c5 0061e14 c887522 416ebf1 61885ca d59421c 053a0cd 48e85fc 80fb2c0 d59421c 053a0cd 0d0a5ca d59421c 80fb2c0 c887522 d59421c 4788cde 80fb2c0 61885ca 44a4b77 6446f53 68e6b55 0061e14 d59421c 98deb78 9a983a8 b37e7d1 d59421c 4788cde d59421c ad7fdeb d59421c 4788cde d59421c debcc70 d59421c 5048713 d59421c 5048713 61885ca 5048713 6446f53 61885ca 0061e14 61885ca 5048713 0061e14 4788cde 0061e14 5048713 c887522 5f7ca36 a32c22a 91e9122 5f7ca36 c887522 61885ca 814f111 0061e14 34a2915 0061e14 7ded1c5 7722634 7bfe46e 7722634 26fc041 65e0342 26fc041 7722634 d55f513 7722634 7ded1c5 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 |
import time
from datetime import datetime, timezone, timedelta
import os
import requests
import pandas as pd
from datasets import Dataset, get_dataset_config_names
from datasets.exceptions import DatasetNotFoundError
from pandas.api.types import is_integer_dtype
import gradio as gr
from src.datamodel.data import F1Data
from src.display.formatting import styled_error, styled_message
from src.display.utils import ModelType
from src.envs import SUBMISSIONS_REPO, TOKEN
from src.logger import get_logger
from src.validation.validate import is_submission_file_valid, is_valid
logger = get_logger(__name__)
MIN_WAIT_TIME_PER_USER_HRS = 24
def add_new_solutions(
lbdb: F1Data,
username: str,
user_id: str,
system_name: str,
org: str,
sys_type: str,
submission_path: str,
is_warmup_dataset: bool,
ensure_all_present: bool = False,
):
# Users must wait MIN_WAIT_TIME_PER_USER_HRS hours between submissions.
from huggingface_hub import HfApi
api = HfApi()
logger.info(f"Who am I: {api.whoami(token=TOKEN)}")
try:
submitted_ids = get_dataset_config_names(SUBMISSIONS_REPO, token=TOKEN)
except (DatasetNotFoundError, FileNotFoundError):
submitted_ids = []
logger.info(f"Found submitted IDs: {submitted_ids}")
user_last_submission_date = None
for sid in submitted_ids:
# Extract user ID (last part)
past_user_id = sid.rsplit("_", 1)[-1]
# Extract timestamp string (first two parts)
ts_str = "_".join(sid.split("_", 2)[:2])
logger.info(f"Comparing past user: {past_user_id} with current user ID: {user_id}")
ts = datetime.strptime(ts_str, "%Y%m%d_%H%M%S").replace(tzinfo=timezone.utc)
if past_user_id == user_id:
if user_last_submission_date is None:
user_last_submission_date = ts
else:
user_last_submission_date = max(user_last_submission_date, ts)
if user_last_submission_date is not None:
now = datetime.now(timezone.utc)
elapsed = now - user_last_submission_date
if elapsed < timedelta(hours=MIN_WAIT_TIME_PER_USER_HRS):
remaining_hrs = MIN_WAIT_TIME_PER_USER_HRS - elapsed.total_seconds() / 3600
logger.info(f"{username} must wait {remaining_hrs:.2f} more hours.")
return styled_error(
f"You must wait {MIN_WAIT_TIME_PER_USER_HRS} hours between submissions. "
f"Remaining wait time: {remaining_hrs:.2f} hours"
)
logger.info(
f"Adding new submission: {system_name=}, {org=}, {sys_type=} and {submission_path=}",
)
# Double-checking.
for val in [system_name, org, sys_type]:
assert is_valid(val)
assert is_submission_file_valid(submission_path, is_warmup_dataset=is_warmup_dataset)
sys_type = ModelType.from_str(sys_type).name
try:
submission_df = pd.read_json(submission_path, lines=True)
if ensure_all_present:
_validate_all_submissions_present(lbdb=lbdb, pd_ds=submission_df)
except Exception:
logger.warning("Failed to parse submission DF!", exc_info=True)
return styled_error(
"An error occurred. Please try again later."
) # Use same message as external error. Avoid infoleak.
submission_id = f"{datetime.now(timezone.utc).strftime('%Y%m%d_%H%M%S')}_{username}_{user_id}"
# Seems good, creating the eval.
logger.info(f"Adding new submission: {submission_id}")
submission_ts = time.time_ns()
def add_info(row):
return {
**row,
"system_name": system_name,
"organization": org,
"system_type": sys_type,
"submission_id": submission_id,
"submission_ts": submission_ts,
"evaluation_id": "", # This will be set later when the evaluation is launched in the backend
"evaluation_start_ts": "", # This will be set when the evaluation starts
}
ds = Dataset.from_pandas(submission_df).map(add_info)
ds.push_to_hub(
SUBMISSIONS_REPO,
submission_id,
private=True,
)
return styled_message(
"Your request has been submitted to the evaluation queue!\n"
+ "Results may take up to 24 hours to be processed and shown in the leaderboard."
)
def fetch_sub_claim(oauth_token: gr.OAuthToken | None) -> dict | None:
if oauth_token is None:
return None
provider = os.getenv("OPENID_PROVIDER_URL")
if not provider:
return None
try:
oidc_meta = requests.get(f"{provider}/.well-known/openid-configuration", timeout=5)
oidc_meta = oidc_meta.json()
userinfo_ep = oidc_meta["userinfo_endpoint"]
claims = requests.get(userinfo_ep, headers={"Authorization": f"Bearer {oauth_token.token}"}, timeout=5)
logger.info(f"userinfo_endpoint response: status={claims.status_code}\nheaders={dict(claims.headers)}")
claims = claims.json()
# Typical fields: sub (stable id), preferred_username, name, picture
return {
"sub": claims.get("sub"),
"preferred_username": claims.get("preferred_username"),
"name": claims.get("name"),
}
except Exception as e:
logger.warning(f"Failed to fetch user claims: {e}")
return None
def _validate_all_submissions_present(
lbdb: F1Data,
pd_ds: pd.DataFrame,
):
logger.info(f"Validating DS size {len(pd_ds)} columns {pd_ds.columns} set {set(pd_ds.columns)}")
expected_cols = ["problem_id", "solution"]
if set(pd_ds.columns) != set(expected_cols):
return ValueError(f"Expected attributes: {expected_cols}, Got: {pd_ds.columns.tolist()}")
if not is_integer_dtype(pd_ds["problem_id"]):
return ValueError("problem_id must be str convertible to int")
if any(type(v) is not str for v in pd_ds["solution"]):
return ValueError("solution must be of type str")
submitted_ids = set(pd_ds.problem_id.astype(str))
if submitted_ids != lbdb.code_problem_ids:
missing = lbdb.code_problem_ids - submitted_ids
unknown = submitted_ids - lbdb.code_problem_ids
raise ValueError(f"Mismatched problem IDs: {len(missing)} missing, {len(unknown)} unknown")
if len(pd_ds) > len(lbdb.code_problem_ids):
return ValueError("Duplicate problem IDs exist in uploaded file")
|