cgeorgiaw HF Staff commited on
Commit
0003044
·
1 Parent(s): 4ac12fd

still trying to make the leaderboard

Browse files
Files changed (1) hide show
  1. app.py +12 -6
app.py CHANGED
@@ -26,13 +26,18 @@ def evaluate_boundary(filename):
26
  write_results(data_dict, result)
27
  return
28
 
 
 
 
 
 
29
  def get_leaderboard():
30
  ds = load_dataset(results_repo, split='train')
31
  df = pd.DataFrame(ds)
32
 
33
  df.rename(columns={'submission_time': 'submission time', 'problem_type': 'problem type'}, inplace=True)
34
- df['user'] = df['user'].apply(lambda x: f'<a href="https://huggingface.co/{x}">{x}</a>').astype(str)
35
- # df = df.to_html(escape=False, render_links=True)
36
 
37
  score_field = "score" if "score" in df.columns else "objective" # fallback
38
 
@@ -48,7 +53,7 @@ def gradio_interface() -> gr.Blocks:
48
  with gr.TabItem("Leaderboard", elem_id="boundary-benchmark-tab-table"):
49
  gr.Markdown("# Boundary Design Leaderboard")
50
 
51
- '''Leaderboard(
52
  value=get_leaderboard(),
53
  datatype=['str', 'date', 'str', 'str', 'bool', 'html', 'number', 'bool', 'number', 'number', 'str'],
54
  select_columns=["submission time", "feasibility", "score", "problem type", "objective", "user"],
@@ -56,15 +61,16 @@ def gradio_interface() -> gr.Blocks:
56
  hide_columns=["result_filename", "submission_filename", "minimize_objective", "boundary_json", "evaluated"],
57
  filter_columns=["problem type"],
58
  every=60,
59
- )'''
 
60
 
61
- gr.Dataframe(
62
  value=get_leaderboard(),
63
  datatype=['str', 'date', 'str', 'str', 'bool', 'html', 'number', 'bool', 'number', 'number', 'str'],
64
  show_search='filter',
65
  every=60,
66
  render=True
67
- )
68
 
69
  with gr.TabItem("About", elem_id="boundary-benchmark-tab-table"):
70
  gr.Markdown(
 
26
  write_results(data_dict, result)
27
  return
28
 
29
+ def make_clickable(name):
30
+ link =f'https://huggingface.co/{name}'
31
+ return f'<a target="_blank" href="{link}" style="color: var(--link-text-color); text-decoration: underline;text-decoration-style: dotted;">{name}</a>'
32
+
33
+
34
  def get_leaderboard():
35
  ds = load_dataset(results_repo, split='train')
36
  df = pd.DataFrame(ds)
37
 
38
  df.rename(columns={'submission_time': 'submission time', 'problem_type': 'problem type'}, inplace=True)
39
+ df['user'] = df['user'].apply(lambda x: make_clickable(x)).astype(str)
40
+
41
 
42
  score_field = "score" if "score" in df.columns else "objective" # fallback
43
 
 
53
  with gr.TabItem("Leaderboard", elem_id="boundary-benchmark-tab-table"):
54
  gr.Markdown("# Boundary Design Leaderboard")
55
 
56
+ Leaderboard(
57
  value=get_leaderboard(),
58
  datatype=['str', 'date', 'str', 'str', 'bool', 'html', 'number', 'bool', 'number', 'number', 'str'],
59
  select_columns=["submission time", "feasibility", "score", "problem type", "objective", "user"],
 
61
  hide_columns=["result_filename", "submission_filename", "minimize_objective", "boundary_json", "evaluated"],
62
  filter_columns=["problem type"],
63
  every=60,
64
+ render=True
65
+ )
66
 
67
+ '''gr.Dataframe(
68
  value=get_leaderboard(),
69
  datatype=['str', 'date', 'str', 'str', 'bool', 'html', 'number', 'bool', 'number', 'number', 'str'],
70
  show_search='filter',
71
  every=60,
72
  render=True
73
+ )'''
74
 
75
  with gr.TabItem("About", elem_id="boundary-benchmark-tab-table"):
76
  gr.Markdown(