Spaces:
Runtime error
Runtime error
Add helpful info after submission
Browse files
app.py
CHANGED
|
@@ -23,7 +23,6 @@ LOGS_REPO = "submission-logs"
|
|
| 23 |
## TODO ##
|
| 24 |
# 1. Add check that fields are nested under `tasks` field correctly
|
| 25 |
# 2. Add check that names of tasks and datasets are valid
|
| 26 |
-
# 3. Decide whether we should have 1 dataset repo per participant or 1 repo per submission
|
| 27 |
|
| 28 |
|
| 29 |
###########
|
|
@@ -105,77 +104,83 @@ with st.form(key="form"):
|
|
| 105 |
submit_button = st.form_submit_button("Make Submission")
|
| 106 |
|
| 107 |
if submit_button and submission_errors == 0:
|
| 108 |
-
st.
|
| 109 |
-
|
| 110 |
-
|
| 111 |
-
|
| 112 |
-
|
| 113 |
-
|
| 114 |
-
|
| 115 |
-
|
| 116 |
-
|
| 117 |
-
|
| 118 |
-
|
| 119 |
-
|
| 120 |
-
|
| 121 |
-
|
| 122 |
-
|
| 123 |
-
|
| 124 |
-
|
| 125 |
-
|
| 126 |
-
|
| 127 |
-
|
| 128 |
-
|
| 129 |
-
|
| 130 |
-
|
| 131 |
-
|
| 132 |
-
|
| 133 |
-
|
| 134 |
-
|
| 135 |
-
|
| 136 |
-
|
| 137 |
-
|
| 138 |
-
|
| 139 |
-
|
| 140 |
-
|
| 141 |
-
|
| 142 |
-
|
| 143 |
-
|
| 144 |
-
|
| 145 |
-
|
| 146 |
-
|
| 147 |
-
|
| 148 |
-
|
| 149 |
-
|
| 150 |
-
|
| 151 |
-
|
| 152 |
-
|
| 153 |
-
|
| 154 |
-
|
| 155 |
-
|
| 156 |
-
|
| 157 |
-
|
| 158 |
-
|
| 159 |
-
|
| 160 |
-
|
| 161 |
-
|
| 162 |
-
|
| 163 |
-
|
| 164 |
-
|
| 165 |
-
|
| 166 |
-
|
| 167 |
-
|
| 168 |
-
|
| 169 |
-
|
| 170 |
-
|
| 171 |
-
|
| 172 |
-
writer.write(job)
|
| 173 |
-
logs_repo.push_to_hub(commit_message=f"Submission with job ID {json_resp['id']}")
|
| 174 |
|
| 175 |
if json_resp["status"] == 1:
|
| 176 |
st.success(
|
| 177 |
f"β
Submission {submission_name} was successfully submitted for evaluation with job ID {json_resp['id']}"
|
| 178 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 179 |
else:
|
| 180 |
st.error("π Oh noes, there was an error submitting your submission! Please contact the organisers")
|
| 181 |
|
|
|
|
| 23 |
## TODO ##
|
| 24 |
# 1. Add check that fields are nested under `tasks` field correctly
|
| 25 |
# 2. Add check that names of tasks and datasets are valid
|
|
|
|
| 26 |
|
| 27 |
|
| 28 |
###########
|
|
|
|
| 104 |
submit_button = st.form_submit_button("Make Submission")
|
| 105 |
|
| 106 |
if submit_button and submission_errors == 0:
|
| 107 |
+
with st.spinner("β³ Preparing submission for evaluation ..."):
|
| 108 |
+
submission_name = json_data["submission_name"]
|
| 109 |
+
submission_name_formatted = submission_name.lower().replace(" ", "-").replace("/", "-")
|
| 110 |
+
submission_time = str(int(datetime.now().timestamp()))
|
| 111 |
+
|
| 112 |
+
# Create submission dataset under benchmarks ORG
|
| 113 |
+
submission_repo_id = f"{user_name}__{submission_name_formatted}__{submission_time}"
|
| 114 |
+
dataset_repo_url = f"https://huggingface.co/datasets/GEM-submissions/{submission_repo_id}"
|
| 115 |
+
repo = Repository(
|
| 116 |
+
local_dir=LOCAL_REPO,
|
| 117 |
+
clone_from=dataset_repo_url,
|
| 118 |
+
repo_type="dataset",
|
| 119 |
+
private=False,
|
| 120 |
+
use_auth_token=HF_TOKEN,
|
| 121 |
+
)
|
| 122 |
+
submission_metadata = {"benchmark": "gem", "type": "prediction", "submission_name": submission_name}
|
| 123 |
+
repo.repocard_metadata_save(submission_metadata)
|
| 124 |
+
|
| 125 |
+
with open(f"{LOCAL_REPO}/submission.json", "w", encoding="utf-8") as f:
|
| 126 |
+
json.dump(json_data, f)
|
| 127 |
+
|
| 128 |
+
# TODO: add informative commit msg
|
| 129 |
+
commit_url = repo.push_to_hub()
|
| 130 |
+
if commit_url is not None:
|
| 131 |
+
commit_sha = commit_url.split("/")[-1]
|
| 132 |
+
else:
|
| 133 |
+
commit_sha = repo.git_head_commit_url().split("/")[-1]
|
| 134 |
+
|
| 135 |
+
submission_id = submission_name + "__" + commit_sha + "__" + submission_time
|
| 136 |
+
|
| 137 |
+
payload = {
|
| 138 |
+
"username": AUTONLP_USERNAME,
|
| 139 |
+
"dataset": "GEM/references",
|
| 140 |
+
"task": 1,
|
| 141 |
+
"model": "gem",
|
| 142 |
+
"submission_dataset": f"GEM-submissions/{submission_repo_id}",
|
| 143 |
+
"submission_id": submission_id,
|
| 144 |
+
"col_mapping": {},
|
| 145 |
+
"split": "test",
|
| 146 |
+
"config": None,
|
| 147 |
+
}
|
| 148 |
+
json_resp = http_post(
|
| 149 |
+
path="/evaluate/create", payload=payload, token=HF_TOKEN, domain=HF_AUTONLP_BACKEND_API
|
| 150 |
+
).json()
|
| 151 |
+
|
| 152 |
+
logs_repo_url = f"https://huggingface.co/datasets/GEM-submissions/{LOGS_REPO}"
|
| 153 |
+
logs_repo = Repository(
|
| 154 |
+
local_dir=LOGS_REPO,
|
| 155 |
+
clone_from=logs_repo_url,
|
| 156 |
+
repo_type="dataset",
|
| 157 |
+
private=True,
|
| 158 |
+
use_auth_token=HF_TOKEN,
|
| 159 |
+
)
|
| 160 |
+
json_resp["submission_name"] = submission_name
|
| 161 |
+
with jsonlines.open(f"{LOGS_REPO}/logs.jsonl") as r:
|
| 162 |
+
lines = []
|
| 163 |
+
for obj in r:
|
| 164 |
+
lines.append(obj)
|
| 165 |
+
|
| 166 |
+
lines.append(json_resp)
|
| 167 |
+
with jsonlines.open(f"{LOGS_REPO}/logs.jsonl", mode="w") as writer:
|
| 168 |
+
for job in lines:
|
| 169 |
+
writer.write(job)
|
| 170 |
+
logs_repo.push_to_hub(commit_message=f"Submission with job ID {json_resp['id']}")
|
|
|
|
|
|
|
| 171 |
|
| 172 |
if json_resp["status"] == 1:
|
| 173 |
st.success(
|
| 174 |
f"β
Submission {submission_name} was successfully submitted for evaluation with job ID {json_resp['id']}"
|
| 175 |
)
|
| 176 |
+
st.markdown(
|
| 177 |
+
f"""
|
| 178 |
+
Evaluation takes appoximately 1-2 hours to complete, so grab a β or π΅ while you wait:
|
| 179 |
+
|
| 180 |
+
* π Click [here](https://huggingface.co/spaces/GEM/results) to view the results from your submission
|
| 181 |
+
* πΎ Click [here]({dataset_repo_url}) to view your submission file on the Hugging Face Hub
|
| 182 |
+
"""
|
| 183 |
+
)
|
| 184 |
else:
|
| 185 |
st.error("π Oh noes, there was an error submitting your submission! Please contact the organisers")
|
| 186 |
|