Completing submission rules
Browse files- src/submission/submit.py +43 -6
src/submission/submit.py
CHANGED
@@ -1,6 +1,6 @@
|
|
1 |
import json
|
2 |
import os
|
3 |
-
from datetime import datetime, timezone
|
4 |
import gradio as gr
|
5 |
from src.display.formatting import styled_error, styled_message, styled_warning
|
6 |
from src.envs import API, EVAL_REQUESTS_PATH, TOKEN, QUEUE_REPO
|
@@ -44,6 +44,19 @@ def add_new_eval(
|
|
44 |
if not is_model_on_hub(model_name=model, token=TOKEN, test_tokenizer=True): #revision=revision
|
45 |
return styled_error("Model does not exist on HF Hub. Please select a valid model name.")
|
46 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
47 |
"""
|
48 |
if model_type is None or model_type == "":
|
49 |
return styled_error("Please select a model type.")
|
@@ -69,10 +82,11 @@ def add_new_eval(
|
|
69 |
except Exception:
|
70 |
return styled_error("Could not get your model information. Please fill it up properly.")
|
71 |
|
|
|
72 |
model_size = get_model_size(model_info=model_info)#, precision=precision
|
73 |
|
74 |
if model_size>15:
|
75 |
-
return styled_error("
|
76 |
|
77 |
# Were the model card and license filled?
|
78 |
try:
|
@@ -83,6 +97,32 @@ def add_new_eval(
|
|
83 |
modelcard_OK, error_msg = check_model_card(model)
|
84 |
if not modelcard_OK:
|
85 |
return styled_error(error_msg)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
86 |
|
87 |
# Seems good, creating the eval
|
88 |
print("Preparing a new eval")
|
@@ -103,10 +143,7 @@ def add_new_eval(
|
|
103 |
#"private": False,
|
104 |
}
|
105 |
|
106 |
-
|
107 |
-
# Check for duplicate submission
|
108 |
-
if f"{model}" in REQUESTED_MODELS: #_{revision}_{precision}
|
109 |
-
return styled_warning("This model has been already submitted.")
|
110 |
|
111 |
print("Creating eval file")
|
112 |
OUT_DIR = f"{EVAL_REQUESTS_PATH}/{user_name}"
|
|
|
1 |
import json
|
2 |
import os
|
3 |
+
from datetime import datetime, timedelta, timezone
|
4 |
import gradio as gr
|
5 |
from src.display.formatting import styled_error, styled_message, styled_warning
|
6 |
from src.envs import API, EVAL_REQUESTS_PATH, TOKEN, QUEUE_REPO
|
|
|
44 |
if not is_model_on_hub(model_name=model, token=TOKEN, test_tokenizer=True): #revision=revision
|
45 |
return styled_error("Model does not exist on HF Hub. Please select a valid model name.")
|
46 |
|
47 |
+
|
48 |
+
##check for org banning
|
49 |
+
progress(0.2, desc=f"Checking for banned orgs")
|
50 |
+
banned_orgs = [{
|
51 |
+
'org_name':'TEMPLATE',
|
52 |
+
'banning_reason':'Submitting contaminated models'
|
53 |
+
}]
|
54 |
+
|
55 |
+
if user_name in [banned_org['org_name'] for banned_org in banned_orgs]:
|
56 |
+
return styled_error(
|
57 |
+
f"Your org \"{user_name}\" is banned from submitting models on ABL. If you think this is a mistake then please contact [email protected]"
|
58 |
+
)
|
59 |
+
|
60 |
"""
|
61 |
if model_type is None or model_type == "":
|
62 |
return styled_error("Please select a model type.")
|
|
|
82 |
except Exception:
|
83 |
return styled_error("Could not get your model information. Please fill it up properly.")
|
84 |
|
85 |
+
progress(0.3, desc=f"Checking model size")
|
86 |
model_size = get_model_size(model_info=model_info)#, precision=precision
|
87 |
|
88 |
if model_size>15:
|
89 |
+
return styled_error("Due to limited GPU resources, we are currently unable to accept community-submitted models exceeding 15 billion parameters")
|
90 |
|
91 |
# Were the model card and license filled?
|
92 |
try:
|
|
|
97 |
modelcard_OK, error_msg = check_model_card(model)
|
98 |
if not modelcard_OK:
|
99 |
return styled_error(error_msg)
|
100 |
+
|
101 |
+
|
102 |
+
|
103 |
+
##check if org have submitted in the last 30 days
|
104 |
+
progress(0.6, desc=f"Checking last submission date")
|
105 |
+
previous_user_submissions = USERS_TO_SUBMISSION_DATES.get(user_name)
|
106 |
+
|
107 |
+
if previous_user_submissions:
|
108 |
+
|
109 |
+
previous_user_submission_dates = [datetime.strptime(date.replace("T"," ").split(" ")[0], "%Y-%m-%d") for date in previous_user_submissions]
|
110 |
+
previous_user_submission_dates.sort(reverse=True)
|
111 |
+
most_recent_submission = previous_user_submission_dates[0]
|
112 |
+
|
113 |
+
time_since_last_submission = datetime.now() - most_recent_submission
|
114 |
+
if time_since_last_submission < timedelta(days=30):
|
115 |
+
return styled_warning(
|
116 |
+
f"Your org \"{user_name}\" have already submitted a model in the last 30 days. Please wait before submitting another model. For exceptions please contact [email protected]"
|
117 |
+
)
|
118 |
+
|
119 |
+
|
120 |
+
|
121 |
+
progress(0.8, desc=f"Checking same model submissions")
|
122 |
+
|
123 |
+
# Check for duplicate submission
|
124 |
+
if f"{model}" in REQUESTED_MODELS: #_{revision}_{precision}
|
125 |
+
return styled_warning("This model has been already submitted.")
|
126 |
|
127 |
# Seems good, creating the eval
|
128 |
print("Preparing a new eval")
|
|
|
143 |
#"private": False,
|
144 |
}
|
145 |
|
146 |
+
|
|
|
|
|
|
|
147 |
|
148 |
print("Creating eval file")
|
149 |
OUT_DIR = f"{EVAL_REQUESTS_PATH}/{user_name}"
|