Spaces:
Running
on
CPU Upgrade
Running
on
CPU Upgrade
Clémentine
commited on
Commit
·
976f398
1
Parent(s):
4ff9eef
fix order of request file vs request file list, to avoid resubmitting issues
Browse files- app.py +2 -1
- src/submission/submit.py +13 -7
app.py
CHANGED
|
@@ -11,7 +11,7 @@ from src.display.about import (
|
|
| 11 |
LLM_BENCHMARKS_TEXT,
|
| 12 |
TITLE,
|
| 13 |
)
|
| 14 |
-
from src.display.css_html_js import custom_css
|
| 15 |
from src.display.utils import (
|
| 16 |
BENCHMARK_COLS,
|
| 17 |
COLS,
|
|
@@ -26,6 +26,7 @@ from src.display.utils import (
|
|
| 26 |
from src.envs import API, EVAL_REQUESTS_PATH, EVAL_RESULTS_PATH, H4_TOKEN, IS_PUBLIC, QUEUE_REPO, REPO_ID, RESULTS_REPO
|
| 27 |
from src.populate import get_evaluation_queue_df, get_leaderboard_df
|
| 28 |
from src.submission.submit import add_new_eval
|
|
|
|
| 29 |
from src.tools.collections import update_collections
|
| 30 |
from src.tools.plots import (
|
| 31 |
create_metric_plot_obj,
|
|
|
|
| 11 |
LLM_BENCHMARKS_TEXT,
|
| 12 |
TITLE,
|
| 13 |
)
|
| 14 |
+
from src.display.css_html_js import custom_css
|
| 15 |
from src.display.utils import (
|
| 16 |
BENCHMARK_COLS,
|
| 17 |
COLS,
|
|
|
|
| 26 |
from src.envs import API, EVAL_REQUESTS_PATH, EVAL_RESULTS_PATH, H4_TOKEN, IS_PUBLIC, QUEUE_REPO, REPO_ID, RESULTS_REPO
|
| 27 |
from src.populate import get_evaluation_queue_df, get_leaderboard_df
|
| 28 |
from src.submission.submit import add_new_eval
|
| 29 |
+
from src.submission.check_validity import already_submitted_models
|
| 30 |
from src.tools.collections import update_collections
|
| 31 |
from src.tools.plots import (
|
| 32 |
create_metric_plot_obj,
|
src/submission/submit.py
CHANGED
|
@@ -13,8 +13,8 @@ from src.submission.check_validity import (
|
|
| 13 |
user_submission_permission,
|
| 14 |
)
|
| 15 |
|
| 16 |
-
|
| 17 |
-
|
| 18 |
|
| 19 |
def add_new_eval(
|
| 20 |
model: str,
|
|
@@ -25,6 +25,12 @@ def add_new_eval(
|
|
| 25 |
weight_type: str,
|
| 26 |
model_type: str,
|
| 27 |
):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 28 |
precision = precision.split(" ")[0]
|
| 29 |
current_time = datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ")
|
| 30 |
|
|
@@ -33,7 +39,7 @@ def add_new_eval(
|
|
| 33 |
|
| 34 |
# Is the user rate limited?
|
| 35 |
user_can_submit, error_msg = user_submission_permission(
|
| 36 |
-
model,
|
| 37 |
)
|
| 38 |
if not user_can_submit:
|
| 39 |
return styled_error(error_msg)
|
|
@@ -99,15 +105,15 @@ def add_new_eval(
|
|
| 99 |
user_name = model.split("/")[0]
|
| 100 |
model_path = model.split("/")[1]
|
| 101 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 102 |
print("Creating eval file")
|
| 103 |
OUT_DIR = f"{EVAL_REQUESTS_PATH}/{user_name}"
|
| 104 |
os.makedirs(OUT_DIR, exist_ok=True)
|
| 105 |
out_path = f"{OUT_DIR}/{model_path}_eval_request_{private}_{precision}_{weight_type}.json"
|
| 106 |
|
| 107 |
-
# Check for duplicate submission
|
| 108 |
-
if f"{model}_{revision}_{precision}" in requested_models:
|
| 109 |
-
return styled_warning("This model has been already submitted.")
|
| 110 |
-
|
| 111 |
with open(out_path, "w") as f:
|
| 112 |
f.write(json.dumps(eval_entry))
|
| 113 |
|
|
|
|
| 13 |
user_submission_permission,
|
| 14 |
)
|
| 15 |
|
| 16 |
+
REQUESTED_MODELS = None
|
| 17 |
+
USERS_TO_SUBMISSION_DATES = None
|
| 18 |
|
| 19 |
def add_new_eval(
|
| 20 |
model: str,
|
|
|
|
| 25 |
weight_type: str,
|
| 26 |
model_type: str,
|
| 27 |
):
|
| 28 |
+
global REQUESTED_MODELS
|
| 29 |
+
global USERS_TO_SUBMISSION_DATES
|
| 30 |
+
if not REQUESTED_MODELS:
|
| 31 |
+
REQUESTED_MODELS, USERS_TO_SUBMISSION_DATES = already_submitted_models(EVAL_REQUESTS_PATH)
|
| 32 |
+
|
| 33 |
+
|
| 34 |
precision = precision.split(" ")[0]
|
| 35 |
current_time = datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ")
|
| 36 |
|
|
|
|
| 39 |
|
| 40 |
# Is the user rate limited?
|
| 41 |
user_can_submit, error_msg = user_submission_permission(
|
| 42 |
+
model, USERS_TO_SUBMISSION_DATES, RATE_LIMIT_PERIOD, RATE_LIMIT_QUOTA
|
| 43 |
)
|
| 44 |
if not user_can_submit:
|
| 45 |
return styled_error(error_msg)
|
|
|
|
| 105 |
user_name = model.split("/")[0]
|
| 106 |
model_path = model.split("/")[1]
|
| 107 |
|
| 108 |
+
# Check for duplicate submission
|
| 109 |
+
if f"{model}_{revision}_{precision}" in REQUESTED_MODELS:
|
| 110 |
+
return styled_warning("This model has been already submitted.")
|
| 111 |
+
|
| 112 |
print("Creating eval file")
|
| 113 |
OUT_DIR = f"{EVAL_REQUESTS_PATH}/{user_name}"
|
| 114 |
os.makedirs(OUT_DIR, exist_ok=True)
|
| 115 |
out_path = f"{OUT_DIR}/{model_path}_eval_request_{private}_{precision}_{weight_type}.json"
|
| 116 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 117 |
with open(out_path, "w") as f:
|
| 118 |
f.write(json.dumps(eval_entry))
|
| 119 |
|