ycy commited on
Commit
1ec5ad1
·
2 Parent(s): 040a4a4 2969b1f
Files changed (3) hide show
  1. app.py +2 -2
  2. src/about.py +3 -0
  3. src/submission/submit.py +14 -17
app.py CHANGED
@@ -29,7 +29,7 @@ from src.display.utils import (
29
  )
30
  from src.envs import API, EVAL_REQUESTS_PATH, EVAL_RESULTS_PATH, QUEUE_REPO, REPO_ID, RESULTS_REPO, TOKEN
31
  from src.populate import get_evaluation_queue_df, get_leaderboard_df
32
- from src.submission.submit import add_new_eval
33
 
34
 
35
 
@@ -151,7 +151,7 @@ with demo:
151
  submit_button = gr.Button("Submit Eval")
152
  submission_result = gr.Markdown()
153
  submit_button.click(
154
- add_new_eval,
155
  [
156
  model_name
157
  ],
 
29
  )
30
  from src.envs import API, EVAL_REQUESTS_PATH, EVAL_RESULTS_PATH, QUEUE_REPO, REPO_ID, RESULTS_REPO, TOKEN
31
  from src.populate import get_evaluation_queue_df, get_leaderboard_df
32
+ from src.submission.submit import add_new_open_model_eval
33
 
34
 
35
 
 
151
  submit_button = gr.Button("Submit Eval")
152
  submission_result = gr.Markdown()
153
  submit_button.click(
154
+ add_new_open_model_eval,
155
  [
156
  model_name
157
  ],
src/about.py CHANGED
@@ -85,7 +85,10 @@ To reproduce our results, here is the commands you can run:
85
  """
86
 
87
  EVALUATION_QUEUE_TEXT = """
 
88
 
 
 
89
 
90
  """
91
 
 
85
  """
86
 
87
  EVALUATION_QUEUE_TEXT = """
88
+ <<<<<<< HEAD
89
 
90
+ =======
91
+ >>>>>>> 2969b1f6d030552c228a8827efdfaf446e18fecc
92
 
93
  """
94
 
src/submission/submit.py CHANGED
@@ -16,10 +16,8 @@ USERS_TO_SUBMISSION_DATES = None
16
 
17
 
18
 
19
- def add_new_eval(
20
  model: str,
21
- model_show: str,
22
- open_source: bool
23
  ):
24
  """通过提交模型到评估队列,将信息自动保存到requests数据集中"""
25
  global REQUESTED_MODELS
@@ -30,11 +28,11 @@ def add_new_eval(
30
  current_time = datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ")
31
 
32
  # Is the model info correctly filled?
33
- if open_source:
34
- try:
35
- model_info = API.model_info(repo_id=model)
36
- except Exception:
37
- return styled_error("Could not get your model information. Please fill it up properly.")
38
 
39
  modelcard_OK, error_msg = check_model_card(model)
40
  if not modelcard_OK:
@@ -45,8 +43,8 @@ def add_new_eval(
45
 
46
  eval_entry = {
47
  "model_name": model,
48
- "model_show": model_show,
49
- "open_source": open_source,
50
  "status": "PENDING",
51
  "submitted_time": str(current_time),
52
  }
@@ -56,13 +54,12 @@ def add_new_eval(
56
  return styled_warning("This model has been already submitted.")
57
 
58
  print("Creating eval file")
59
- if open_source:
60
- user_name = model.split("/")[0]
61
- OUT_DIR = f"{EVAL_REQUESTS_PATH}/{user_name}"
62
- os.makedirs(OUT_DIR, exist_ok=True)
63
- out_path = f"{model}_eval_request_False.json"
64
- else:
65
- out_path = f"{model_show}_eval_request_True.json"
66
 
67
  with open(out_path, "w") as f:
68
  f.write(json.dumps(eval_entry))
 
16
 
17
 
18
 
19
+ def add_new_open_model_eval(
20
  model: str,
 
 
21
  ):
22
  """通过提交模型到评估队列,将信息自动保存到requests数据集中"""
23
  global REQUESTED_MODELS
 
28
  current_time = datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ")
29
 
30
  # Is the model info correctly filled?
31
+
32
+ try:
33
+ model_info = API.model_info(repo_id=model)
34
+ except Exception:
35
+ return styled_error("Could not get your model information. Please fill it up properly.")
36
 
37
  modelcard_OK, error_msg = check_model_card(model)
38
  if not modelcard_OK:
 
43
 
44
  eval_entry = {
45
  "model_name": model,
46
+ "model_show": model.split("/")[1],
47
+ "open_source": True,
48
  "status": "PENDING",
49
  "submitted_time": str(current_time),
50
  }
 
54
  return styled_warning("This model has been already submitted.")
55
 
56
  print("Creating eval file")
57
+
58
+ user_name = model.split("/")[0]
59
+ OUT_DIR = f"{EVAL_REQUESTS_PATH}/{user_name}"
60
+ os.makedirs(OUT_DIR, exist_ok=True)
61
+ out_path = f"{model}_eval_request_False.json"
62
+
 
63
 
64
  with open(out_path, "w") as f:
65
  f.write(json.dumps(eval_entry))