Spaces:
Runtime error
Runtime error
Commit
·
65f73c3
1
Parent(s):
efaa1b2
Comment out average score in leaderboard display and update sorting criteria to prioritize task12
Browse files- src/display/utils.py +1 -1
- src/leaderboard/read_evals.py +1 -1
- src/populate.py +1 -1
src/display/utils.py
CHANGED
@@ -26,7 +26,7 @@ auto_eval_column_dict = []
|
|
26 |
auto_eval_column_dict.append(["model_type_symbol", ColumnContent, ColumnContent("T", "str", True, never_hidden=True)])
|
27 |
auto_eval_column_dict.append(["model", ColumnContent, ColumnContent("Model", "markdown", True, never_hidden=True)])
|
28 |
#Scores
|
29 |
-
auto_eval_column_dict.append(["average", ColumnContent, ColumnContent("Average ⬆️", "number", True)])
|
30 |
for task in Tasks:
|
31 |
auto_eval_column_dict.append([task.name, ColumnContent, ColumnContent(task.value.col_name, "number", True)])
|
32 |
# Model information
|
|
|
26 |
auto_eval_column_dict.append(["model_type_symbol", ColumnContent, ColumnContent("T", "str", True, never_hidden=True)])
|
27 |
auto_eval_column_dict.append(["model", ColumnContent, ColumnContent("Model", "markdown", True, never_hidden=True)])
|
28 |
#Scores
|
29 |
+
# auto_eval_column_dict.append(["average", ColumnContent, ColumnContent("Average ⬆️", "number", True)])
|
30 |
for task in Tasks:
|
31 |
auto_eval_column_dict.append([task.name, ColumnContent, ColumnContent(task.value.col_name, "number", True)])
|
32 |
# Model information
|
src/leaderboard/read_evals.py
CHANGED
@@ -121,7 +121,7 @@ class EvalResult:
|
|
121 |
AutoEvalColumn.architecture.name: self.architecture,
|
122 |
AutoEvalColumn.model.name: make_clickable_model(self.full_model),
|
123 |
AutoEvalColumn.revision.name: self.revision,
|
124 |
-
AutoEvalColumn.average.name: average,
|
125 |
AutoEvalColumn.license.name: self.license,
|
126 |
AutoEvalColumn.likes.name: self.likes,
|
127 |
AutoEvalColumn.params.name: self.num_params,
|
|
|
121 |
AutoEvalColumn.architecture.name: self.architecture,
|
122 |
AutoEvalColumn.model.name: make_clickable_model(self.full_model),
|
123 |
AutoEvalColumn.revision.name: self.revision,
|
124 |
+
# AutoEvalColumn.average.name: average,
|
125 |
AutoEvalColumn.license.name: self.license,
|
126 |
AutoEvalColumn.likes.name: self.likes,
|
127 |
AutoEvalColumn.params.name: self.num_params,
|
src/populate.py
CHANGED
@@ -14,7 +14,7 @@ def get_leaderboard_df(results_path: str, requests_path: str, cols: list, benchm
|
|
14 |
all_data_json = [v.to_dict() for v in raw_data]
|
15 |
|
16 |
df = pd.DataFrame.from_records(all_data_json)
|
17 |
-
df = df.sort_values(by=[AutoEvalColumn.
|
18 |
df = df[cols].round(decimals=2)
|
19 |
|
20 |
# filter out if any of the benchmarks have not been produced
|
|
|
14 |
all_data_json = [v.to_dict() for v in raw_data]
|
15 |
|
16 |
df = pd.DataFrame.from_records(all_data_json)
|
17 |
+
df = df.sort_values(by=[AutoEvalColumn.task12.name], ascending=False)
|
18 |
df = df[cols].round(decimals=2)
|
19 |
|
20 |
# filter out if any of the benchmarks have not been produced
|