jasonshaoshun commited on
Commit
ad392eb
·
1 Parent(s): 606fc93
Files changed (1) hide show
  1. app.py +82 -27
app.py CHANGED
@@ -255,36 +255,36 @@ from src.about import TasksMib_Subgraph
255
 
256
 
257
 
258
- def init_leaderboard_mib_subgraph(dataframe, track):
259
- """Initialize the subgraph leaderboard with grouped column selection for gradio-leaderboard 0.0.13"""
260
- if dataframe is None or dataframe.empty:
261
- raise ValueError("Leaderboard DataFrame is empty or None.")
262
 
263
- # Get all unique tasks and models
264
- tasks = [task.value.benchmark for task in TasksMib_Subgraph]
265
- models = list(set(model for task in TasksMib_Subgraph for model in task.value.models))
266
 
267
- # Create two selection groups: one for tasks and one for models
268
- # In 0.0.13, we can only have one SelectColumns, so we'll combine them
269
- selection_choices = [
270
- *[f"Task: {task}" for task in tasks], # Prefix with 'Task:' for clarity
271
- *[f"Model: {model}" for model in models] # Prefix with 'Model:' for clarity
272
- ]
273
 
274
- return Leaderboard(
275
- value=dataframe,
276
- datatype=[c.type for c in fields(AutoEvalColumn_mib_subgraph)],
277
- select_columns=SelectColumns(
278
- default_selection=selection_choices, # Show all by default
279
- choices=selection_choices,
280
- cant_deselect=["Method"], # Method column always visible
281
- label="Select Tasks or Models:",
282
- ),
283
- search_columns=["Method"],
284
- hide_columns=[c.name for c in fields(AutoEvalColumn_mib_subgraph) if c.hidden],
285
- bool_checkboxgroup_label="Hide models",
286
- interactive=False,
287
- )
288
 
289
 
290
  # def init_leaderboard_mib_subgraph(dataframe, track):
@@ -318,6 +318,61 @@ def init_leaderboard_mib_subgraph(dataframe, track):
318
  # )
319
 
320
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
321
 
322
 
323
 
 
255
 
256
 
257
 
258
+ # def init_leaderboard_mib_subgraph(dataframe, track):
259
+ # """Initialize the subgraph leaderboard with grouped column selection for gradio-leaderboard 0.0.13"""
260
+ # if dataframe is None or dataframe.empty:
261
+ # raise ValueError("Leaderboard DataFrame is empty or None.")
262
 
263
+ # # Get all unique tasks and models
264
+ # tasks = [task.value.benchmark for task in TasksMib_Subgraph]
265
+ # models = list(set(model for task in TasksMib_Subgraph for model in task.value.models))
266
 
267
+ # # Create two selection groups: one for tasks and one for models
268
+ # # In 0.0.13, we can only have one SelectColumns, so we'll combine them
269
+ # selection_choices = [
270
+ # *[f"Task: {task}" for task in tasks], # Prefix with 'Task:' for clarity
271
+ # *[f"Model: {model}" for model in models] # Prefix with 'Model:' for clarity
272
+ # ]
273
 
274
+ # return Leaderboard(
275
+ # value=dataframe,
276
+ # datatype=[c.type for c in fields(AutoEvalColumn_mib_subgraph)],
277
+ # select_columns=SelectColumns(
278
+ # default_selection=selection_choices, # Show all by default
279
+ # choices=selection_choices,
280
+ # cant_deselect=["Method"], # Method column always visible
281
+ # label="Select Tasks or Models:",
282
+ # ),
283
+ # search_columns=["Method"],
284
+ # hide_columns=[c.name for c in fields(AutoEvalColumn_mib_subgraph) if c.hidden],
285
+ # bool_checkboxgroup_label="Hide models",
286
+ # interactive=False,
287
+ # )
288
 
289
 
290
  # def init_leaderboard_mib_subgraph(dataframe, track):
 
318
  # )
319
 
320
 
321
+ def init_leaderboard_mib_subgraph(dataframe, track):
322
+ """Initialize the subgraph leaderboard with verified task/model column selection"""
323
+ if dataframe is None or dataframe.empty:
324
+ raise ValueError("Leaderboard DataFrame is empty or None.")
325
+
326
+ # First, let's identify which columns actually exist in our dataframe
327
+ print("Available columns in dataframe:", dataframe.columns.tolist())
328
+
329
+ # Create task selections based on TasksMib_Subgraph definition
330
+ task_selections = []
331
+ for task in TasksMib_Subgraph:
332
+ task_cols = []
333
+ for model in task.value.models:
334
+ col_name = f"{task.value.benchmark}_{model}"
335
+ if col_name in dataframe.columns:
336
+ task_cols.append(col_name)
337
+
338
+ if task_cols: # Only add tasks that have data
339
+ print(f"Task {task.value.benchmark} has columns:", task_cols)
340
+ task_selections.append(f"Task: {task.value.benchmark}")
341
+
342
+ # Create model selections by checking which models appear in columns
343
+ model_selections = []
344
+ all_models = list(set(model for task in TasksMib_Subgraph for model in task.value.models))
345
+
346
+ for model in all_models:
347
+ model_cols = []
348
+ for task in TasksMib_Subgraph:
349
+ if model in task.value.models:
350
+ col_name = f"{task.value.benchmark}_{model}"
351
+ if col_name in dataframe.columns:
352
+ model_cols.append(col_name)
353
+
354
+ if model_cols: # Only add models that have data
355
+ print(f"Model {model} has columns:", model_cols)
356
+ model_selections.append(f"Model: {model}")
357
+
358
+ # Combine all selections
359
+ selections = task_selections + model_selections
360
+ print("Final selection options:", selections)
361
+
362
+ return Leaderboard(
363
+ value=dataframe,
364
+ datatype=[c.type for c in fields(AutoEvalColumn_mib_subgraph)],
365
+ select_columns=SelectColumns(
366
+ default_selection=selections,
367
+ label="Select Tasks or Models:"
368
+ ),
369
+ search_columns=["Method"],
370
+ hide_columns=[c.name for c in fields(AutoEvalColumn_mib_subgraph) if c.hidden],
371
+ bool_checkboxgroup_label="Hide models",
372
+ interactive=False,
373
+ )
374
+
375
+
376
 
377
 
378