jasonshaoshun commited on
Commit
0c85aa2
·
1 Parent(s): 5b3b90b
Files changed (1) hide show
  1. app.py +113 -50
app.py CHANGED
@@ -318,63 +318,131 @@ from src.about import TasksMib_Subgraph
318
  # )
319
 
320
 
321
- def init_leaderboard_mib_subgraph(dataframe, track):
322
- """Initialize the subgraph leaderboard with verified task/model column selection"""
323
- if dataframe is None or dataframe.empty:
324
- raise ValueError("Leaderboard DataFrame is empty or None.")
 
 
 
 
 
 
 
 
 
325
 
326
- # First, let's identify which columns actually exist in our dataframe
327
- print("Available columns in dataframe:", dataframe.columns.tolist())
328
-
329
- # Create task selections based on TasksMib_Subgraph definition
330
- task_selections = []
331
- for task in TasksMib_Subgraph:
332
- task_cols = []
333
- for model in task.value.models:
334
- col_name = f"{task.value.benchmark}_{model}"
335
- if col_name in dataframe.columns:
336
- task_cols.append(col_name)
337
 
338
- if task_cols: # Only add tasks that have data
339
- print(f"Task {task.value.benchmark} has columns:", task_cols)
340
- task_selections.append(f"Task: {task.value.benchmark}")
341
-
342
- # Create model selections by checking which models appear in columns
343
- model_selections = []
344
- all_models = list(set(model for task in TasksMib_Subgraph for model in task.value.models))
345
-
346
- for model in all_models:
347
- model_cols = []
348
- for task in TasksMib_Subgraph:
349
- if model in task.value.models:
350
- col_name = f"{task.value.benchmark}_{model}"
351
- if col_name in dataframe.columns:
352
- model_cols.append(col_name)
353
 
354
- if model_cols: # Only add models that have data
355
- print(f"Model {model} has columns:", model_cols)
356
- model_selections.append(f"Model: {model}")
 
 
 
 
 
 
 
 
 
 
357
 
358
- # Combine all selections
359
- selections = task_selections + model_selections
360
- print("Final selection options:", selections)
 
 
 
 
 
 
 
 
 
361
 
362
- # Print DataFrame information
363
- print("\nDebugging DataFrame:")
364
- print("DataFrame columns:", dataframe.columns.tolist())
365
- print("DataFrame shape:", dataframe.shape)
366
- print("DataFrame head:\n", dataframe.head())
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
367
 
368
  return Leaderboard(
369
  value=dataframe,
370
  datatype=[c.type for c in fields(AutoEvalColumn_mib_subgraph)],
371
  select_columns=SelectColumns(
372
- default_selection=selections,
373
- label="Select Tasks or Models:"
374
  ),
375
  search_columns=["Method"],
376
- hide_columns=[c.name for c in fields(AutoEvalColumn_mib_subgraph) if c.hidden],
377
- bool_checkboxgroup_label="Hide models",
378
  interactive=False,
379
  )
380
 
@@ -382,11 +450,6 @@ def init_leaderboard_mib_subgraph(dataframe, track):
382
 
383
 
384
 
385
-
386
-
387
-
388
-
389
-
390
  def init_leaderboard_mib_causalgraph(dataframe, track):
391
  # print("Debugging column issues:")
392
  # print("\nActual DataFrame columns:")
 
318
  # )
319
 
320
 
321
+
322
+
323
+
324
+
325
+
326
+
327
+ # def init_leaderboard_mib_subgraph(dataframe, track):
328
+ # """Initialize the subgraph leaderboard with verified task/model column selection"""
329
+ # if dataframe is None or dataframe.empty:
330
+ # raise ValueError("Leaderboard DataFrame is empty or None.")
331
+
332
+ # # First, let's identify which columns actually exist in our dataframe
333
+ # print("Available columns in dataframe:", dataframe.columns.tolist())
334
 
335
+ # # Create task selections based on TasksMib_Subgraph definition
336
+ # task_selections = []
337
+ # for task in TasksMib_Subgraph:
338
+ # task_cols = []
339
+ # for model in task.value.models:
340
+ # col_name = f"{task.value.benchmark}_{model}"
341
+ # if col_name in dataframe.columns:
342
+ # task_cols.append(col_name)
 
 
 
343
 
344
+ # if task_cols: # Only add tasks that have data
345
+ # print(f"Task {task.value.benchmark} has columns:", task_cols)
346
+ # task_selections.append(f"Task: {task.value.benchmark}")
347
+
348
+ # # Create model selections by checking which models appear in columns
349
+ # model_selections = []
350
+ # all_models = list(set(model for task in TasksMib_Subgraph for model in task.value.models))
351
+
352
+ # for model in all_models:
353
+ # model_cols = []
354
+ # for task in TasksMib_Subgraph:
355
+ # if model in task.value.models:
356
+ # col_name = f"{task.value.benchmark}_{model}"
357
+ # if col_name in dataframe.columns:
358
+ # model_cols.append(col_name)
359
 
360
+ # if model_cols: # Only add models that have data
361
+ # print(f"Model {model} has columns:", model_cols)
362
+ # model_selections.append(f"Model: {model}")
363
+
364
+ # # Combine all selections
365
+ # selections = task_selections + model_selections
366
+ # print("Final selection options:", selections)
367
+
368
+ # # Print DataFrame information
369
+ # print("\nDebugging DataFrame:")
370
+ # print("DataFrame columns:", dataframe.columns.tolist())
371
+ # print("DataFrame shape:", dataframe.shape)
372
+ # print("DataFrame head:\n", dataframe.head())
373
 
374
+ # return Leaderboard(
375
+ # value=dataframe,
376
+ # datatype=[c.type for c in fields(AutoEvalColumn_mib_subgraph)],
377
+ # select_columns=SelectColumns(
378
+ # default_selection=selections,
379
+ # label="Select Tasks or Models:"
380
+ # ),
381
+ # search_columns=["Method"],
382
+ # hide_columns=[c.name for c in fields(AutoEvalColumn_mib_subgraph) if c.hidden],
383
+ # bool_checkboxgroup_label="Hide models",
384
+ # interactive=False,
385
+ # )
386
 
387
+
388
+
389
+ def init_leaderboard_mib_subgraph(dataframe, track):
390
+ """Initialize the subgraph leaderboard with benchmark and model filtering capabilities."""
391
+ if dataframe is None or dataframe.empty:
392
+ raise ValueError("Leaderboard DataFrame is empty or None.")
393
+
394
+ # Print DataFrame information for debugging
395
+ print("\nDebugging DataFrame columns:", dataframe.columns.tolist())
396
+
397
+ # Get result columns (excluding Method and Average)
398
+ result_columns = [col for col in dataframe.columns
399
+ if col not in ['Method', 'Average'] and '_' in col]
400
+
401
+ # Create benchmark and model selections
402
+ benchmarks = set()
403
+ models = set()
404
+
405
+ # Extract unique benchmarks and models from column names
406
+ for col in result_columns:
407
+ benchmark, model = col.split('_')
408
+ benchmarks.add(benchmark)
409
+ models.add(model)
410
+
411
+ # Create selection groups
412
+ benchmark_selections = {
413
+ # For each benchmark, store which columns should be shown
414
+ benchmark: [col for col in result_columns if col.startswith(f"{benchmark}_")]
415
+ for benchmark in benchmarks
416
+ }
417
+
418
+ model_selections = {
419
+ # For each model, store which columns should be shown
420
+ model: [col for col in result_columns if col.endswith(f"_{model}")]
421
+ for model in models
422
+ }
423
+
424
+ # Combine the selection mappings
425
+ selection_groups = {
426
+ **benchmark_selections,
427
+ **model_selections
428
+ }
429
+
430
+ print("\nDebugging Selection Groups:")
431
+ print("Benchmarks:", benchmark_selections.keys())
432
+ print("Models:", model_selections.keys())
433
+
434
+ # Convert keys to list for selection options
435
+ selection_options = list(selection_groups.keys())
436
 
437
  return Leaderboard(
438
  value=dataframe,
439
  datatype=[c.type for c in fields(AutoEvalColumn_mib_subgraph)],
440
  select_columns=SelectColumns(
441
+ default_selection=selection_options, # Show all options by default
442
+ label="Filter by Benchmark or Model:"
443
  ),
444
  search_columns=["Method"],
445
+ hide_columns=[],
 
446
  interactive=False,
447
  )
448
 
 
450
 
451
 
452
 
 
 
 
 
 
453
  def init_leaderboard_mib_causalgraph(dataframe, track):
454
  # print("Debugging column issues:")
455
  # print("\nActual DataFrame columns:")