jasonshaoshun commited on
Commit
c50d688
·
1 Parent(s): 7d21286
Files changed (1) hide show
  1. app.py +34 -40
app.py CHANGED
@@ -454,68 +454,59 @@ from src.about import TasksMib_Subgraph
454
 
455
 
456
 
457
-
458
  def init_leaderboard_mib_subgraph(dataframe, track):
459
- """Initialize the subgraph leaderboard with benchmark and model filtering using direct enum access."""
460
  if dataframe is None or dataframe.empty:
461
  raise ValueError("Leaderboard DataFrame is empty or None.")
462
-
463
  print("\nDebugging DataFrame columns:", dataframe.columns.tolist())
464
 
465
- # Get benchmarks directly from TasksMib_Subgraph
466
- benchmarks = [task.value.benchmark for task in TasksMib_Subgraph]
467
- print("\nBenchmarks from enum:", benchmarks)
468
 
469
- # Get unique models from all tasks
470
- models = list(set(
471
- model # Get each model
472
- for task in TasksMib_Subgraph # For each task
473
- for model in task.value.models # Get all its models
474
- ))
475
- print("\nModels from enum:", models)
476
-
477
- # Create benchmark selections - map each benchmark to its columns
478
- benchmark_selections = {}
479
  for task in TasksMib_Subgraph:
480
  benchmark = task.value.benchmark
481
- # For this benchmark, get all its valid model combinations
482
- valid_columns = [
483
  f"{benchmark}_{model}"
484
  for model in task.value.models
485
  if f"{benchmark}_{model}" in dataframe.columns
486
  ]
487
- benchmark_selections[benchmark] = valid_columns
488
- print(f"\nBenchmark {benchmark} maps to columns:", valid_columns)
489
-
490
- # Create model selections - map each model to its columns
491
- model_selections = {}
492
- for model in models:
493
- # For this model, find all benchmarks where it's used
494
- valid_columns = [
 
 
 
 
495
  f"{task.value.benchmark}_{model}"
496
  for task in TasksMib_Subgraph
497
  if model in task.value.models
498
  and f"{task.value.benchmark}_{model}" in dataframe.columns
499
  ]
500
- model_selections[model] = valid_columns
501
- print(f"\nModel {model} maps to columns:", valid_columns)
502
-
503
- # Combine all selections
504
- selection_groups = {
505
- **benchmark_selections,
506
- **model_selections
507
- }
508
-
509
- # Get the final selection options
510
- selection_options = list(selection_groups.keys())
511
- print("\nFinal selection options:", selection_options)
512
 
 
 
 
 
513
  return Leaderboard(
514
  value=dataframe,
515
  datatype=[c.type for c in fields(AutoEvalColumn_mib_subgraph)],
516
  select_columns=SelectColumns(
517
- default_selection=selection_options,
518
- label="Filter by Benchmark or Model:"
519
  ),
520
  search_columns=["Method"],
521
  hide_columns=[],
@@ -526,6 +517,9 @@ def init_leaderboard_mib_subgraph(dataframe, track):
526
 
527
 
528
 
 
 
 
529
  def init_leaderboard_mib_causalgraph(dataframe, track):
530
  # print("Debugging column issues:")
531
  # print("\nActual DataFrame columns:")
 
454
 
455
 
456
 
 
457
  def init_leaderboard_mib_subgraph(dataframe, track):
458
+ """Initialize the subgraph leaderboard with grouped column selection by benchmark."""
459
  if dataframe is None or dataframe.empty:
460
  raise ValueError("Leaderboard DataFrame is empty or None.")
461
+
462
  print("\nDebugging DataFrame columns:", dataframe.columns.tolist())
463
 
464
+ # Create groups of columns by benchmark
465
+ benchmark_groups = []
 
466
 
467
+ # For each benchmark in our TasksMib_Subgraph enum...
 
 
 
 
 
 
 
 
 
468
  for task in TasksMib_Subgraph:
469
  benchmark = task.value.benchmark
470
+ # Get all valid columns for this benchmark's models
471
+ benchmark_cols = [
472
  f"{benchmark}_{model}"
473
  for model in task.value.models
474
  if f"{benchmark}_{model}" in dataframe.columns
475
  ]
476
+ if benchmark_cols: # Only add if we have valid columns
477
+ benchmark_groups.append(benchmark_cols)
478
+ print(f"\nBenchmark group for {benchmark}:", benchmark_cols)
479
+
480
+ # Create model groups as well
481
+ model_groups = []
482
+ all_models = list(set(model for task in TasksMib_Subgraph for model in task.value.models))
483
+
484
+ # For each unique model...
485
+ for model in all_models:
486
+ # Get all valid columns for this model across benchmarks
487
+ model_cols = [
488
  f"{task.value.benchmark}_{model}"
489
  for task in TasksMib_Subgraph
490
  if model in task.value.models
491
  and f"{task.value.benchmark}_{model}" in dataframe.columns
492
  ]
493
+ if model_cols: # Only add if we have valid columns
494
+ model_groups.append(model_cols)
495
+ print(f"\nModel group for {model}:", model_cols)
496
+
497
+ # Combine all groups
498
+ all_groups = benchmark_groups + model_groups
 
 
 
 
 
 
499
 
500
+ # Flatten groups for default selection (show everything initially)
501
+ all_columns = [col for group in all_groups for col in group]
502
+ print("\nAll available columns:", all_columns)
503
+
504
  return Leaderboard(
505
  value=dataframe,
506
  datatype=[c.type for c in fields(AutoEvalColumn_mib_subgraph)],
507
  select_columns=SelectColumns(
508
+ default_selection=all_columns, # Show all columns initially
509
+ label="Select Results:"
510
  ),
511
  search_columns=["Method"],
512
  hide_columns=[],
 
517
 
518
 
519
 
520
+
521
+
522
+
523
  def init_leaderboard_mib_causalgraph(dataframe, track):
524
  # print("Debugging column issues:")
525
  # print("\nActual DataFrame columns:")