Spaces:
Running
Running
jasonshaoshun
commited on
Commit
·
606fc93
1
Parent(s):
b688fa2
debug
Browse files
app.py
CHANGED
@@ -255,69 +255,69 @@ from src.about import TasksMib_Subgraph
|
|
255 |
|
256 |
|
257 |
|
258 |
-
# def init_leaderboard_mib_subgraph(dataframe, track):
|
259 |
-
# """Initialize the subgraph leaderboard with grouped column selection for gradio-leaderboard 0.0.13"""
|
260 |
-
# if dataframe is None or dataframe.empty:
|
261 |
-
# raise ValueError("Leaderboard DataFrame is empty or None.")
|
262 |
-
|
263 |
-
# # Get all unique tasks and models
|
264 |
-
# tasks = [task.value.benchmark for task in TasksMib_Subgraph]
|
265 |
-
# models = list(set(model for task in TasksMib_Subgraph for model in task.value.models))
|
266 |
-
|
267 |
-
# # Create two selection groups: one for tasks and one for models
|
268 |
-
# # In 0.0.13, we can only have one SelectColumns, so we'll combine them
|
269 |
-
# selection_choices = [
|
270 |
-
# *[f"Task: {task}" for task in tasks], # Prefix with 'Task:' for clarity
|
271 |
-
# *[f"Model: {model}" for model in models] # Prefix with 'Model:' for clarity
|
272 |
-
# ]
|
273 |
-
|
274 |
-
# return Leaderboard(
|
275 |
-
# value=dataframe,
|
276 |
-
# datatype=[c.type for c in fields(AutoEvalColumn_mib_subgraph)],
|
277 |
-
# select_columns=SelectColumns(
|
278 |
-
# default_selection=selection_choices, # Show all by default
|
279 |
-
# choices=selection_choices,
|
280 |
-
# cant_deselect=["Method"], # Method column always visible
|
281 |
-
# label="Select Tasks or Models:",
|
282 |
-
# ),
|
283 |
-
# search_columns=["Method"],
|
284 |
-
# hide_columns=[c.name for c in fields(AutoEvalColumn_mib_subgraph) if c.hidden],
|
285 |
-
# bool_checkboxgroup_label="Hide models",
|
286 |
-
# interactive=False,
|
287 |
-
# )
|
288 |
-
|
289 |
-
|
290 |
def init_leaderboard_mib_subgraph(dataframe, track):
|
291 |
-
"""Initialize the subgraph leaderboard
|
292 |
-
|
293 |
-
This implementation creates a focused view where users can select which task-model
|
294 |
-
combinations they want to see, making the analysis of results more straightforward.
|
295 |
-
"""
|
296 |
if dataframe is None or dataframe.empty:
|
297 |
raise ValueError("Leaderboard DataFrame is empty or None.")
|
|
|
|
|
|
|
|
|
298 |
|
299 |
-
#
|
300 |
-
|
301 |
-
|
302 |
-
for
|
303 |
-
|
304 |
-
|
305 |
-
task_model_columns.append(col_name)
|
306 |
|
307 |
return Leaderboard(
|
308 |
value=dataframe,
|
309 |
datatype=[c.type for c in fields(AutoEvalColumn_mib_subgraph)],
|
310 |
select_columns=SelectColumns(
|
311 |
-
default_selection=
|
312 |
-
|
|
|
|
|
313 |
),
|
314 |
-
search_columns=["Method"],
|
315 |
-
hide_columns=[
|
316 |
bool_checkboxgroup_label="Hide models",
|
317 |
interactive=False,
|
318 |
)
|
319 |
|
320 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
321 |
|
322 |
|
323 |
|
|
|
255 |
|
256 |
|
257 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
258 |
def init_leaderboard_mib_subgraph(dataframe, track):
|
259 |
+
"""Initialize the subgraph leaderboard with grouped column selection for gradio-leaderboard 0.0.13"""
|
|
|
|
|
|
|
|
|
260 |
if dataframe is None or dataframe.empty:
|
261 |
raise ValueError("Leaderboard DataFrame is empty or None.")
|
262 |
+
|
263 |
+
# Get all unique tasks and models
|
264 |
+
tasks = [task.value.benchmark for task in TasksMib_Subgraph]
|
265 |
+
models = list(set(model for task in TasksMib_Subgraph for model in task.value.models))
|
266 |
|
267 |
+
# Create two selection groups: one for tasks and one for models
|
268 |
+
# In 0.0.13, we can only have one SelectColumns, so we'll combine them
|
269 |
+
selection_choices = [
|
270 |
+
*[f"Task: {task}" for task in tasks], # Prefix with 'Task:' for clarity
|
271 |
+
*[f"Model: {model}" for model in models] # Prefix with 'Model:' for clarity
|
272 |
+
]
|
|
|
273 |
|
274 |
return Leaderboard(
|
275 |
value=dataframe,
|
276 |
datatype=[c.type for c in fields(AutoEvalColumn_mib_subgraph)],
|
277 |
select_columns=SelectColumns(
|
278 |
+
default_selection=selection_choices, # Show all by default
|
279 |
+
choices=selection_choices,
|
280 |
+
cant_deselect=["Method"], # Method column always visible
|
281 |
+
label="Select Tasks or Models:",
|
282 |
),
|
283 |
+
search_columns=["Method"],
|
284 |
+
hide_columns=[c.name for c in fields(AutoEvalColumn_mib_subgraph) if c.hidden],
|
285 |
bool_checkboxgroup_label="Hide models",
|
286 |
interactive=False,
|
287 |
)
|
288 |
|
289 |
|
290 |
+
# def init_leaderboard_mib_subgraph(dataframe, track):
|
291 |
+
# """Initialize the subgraph leaderboard focusing only on task and model filtering.
|
292 |
+
|
293 |
+
# This implementation creates a focused view where users can select which task-model
|
294 |
+
# combinations they want to see, making the analysis of results more straightforward.
|
295 |
+
# """
|
296 |
+
# if dataframe is None or dataframe.empty:
|
297 |
+
# raise ValueError("Leaderboard DataFrame is empty or None.")
|
298 |
+
|
299 |
+
# # Get all task-model combinations that actually exist in our data
|
300 |
+
# task_model_columns = []
|
301 |
+
# for task in TasksMib_Subgraph:
|
302 |
+
# for model in task.value.models:
|
303 |
+
# col_name = f"{task.value.benchmark}_{model}"
|
304 |
+
# if col_name in dataframe.columns:
|
305 |
+
# task_model_columns.append(col_name)
|
306 |
+
|
307 |
+
# return Leaderboard(
|
308 |
+
# value=dataframe,
|
309 |
+
# datatype=[c.type for c in fields(AutoEvalColumn_mib_subgraph)],
|
310 |
+
# select_columns=SelectColumns(
|
311 |
+
# default_selection=task_model_columns,
|
312 |
+
# label="Select Task-Model Combinations:",
|
313 |
+
# ),
|
314 |
+
# search_columns=["Method"], # Keep Method searchable but not in column selection
|
315 |
+
# hide_columns=[], # We don't need to hide any columns
|
316 |
+
# bool_checkboxgroup_label="Hide models",
|
317 |
+
# interactive=False,
|
318 |
+
# )
|
319 |
+
|
320 |
+
|
321 |
|
322 |
|
323 |
|