jasonshaoshun commited on
Commit
2797503
·
1 Parent(s): 59781bb
Files changed (1) hide show
  1. app.py +53 -54
app.py CHANGED
@@ -366,65 +366,64 @@ def init_leaderboard_mib_subgraph(dataframe, track):
366
  # Important: We need to rename our DataFrame columns to match display names
367
  renamed_df = dataframe.rename(columns=display_mapping)
368
 
369
- return Leaderboard(
370
- value=renamed_df, # Use DataFrame with display names
371
- datatype=[c.type for c in fields(AutoEvalColumn_mib_subgraph)],
372
- select_columns=SelectColumns(
373
- default_selection=all_columns, # Now contains display names
374
- label="Select Results:"
375
- ),
376
- search_columns=["Method"],
377
- hide_columns=[],
378
- interactive=False,
379
- )
380
 
381
-
382
- # # Complete column groups for both benchmarks and models
383
- # # Define keywords for filtering
384
- # benchmark_keywords = ["ioi", "mcqa", "arithmetic_addition", "arithmetic_subtraction", "arc_easy", "arc_challenge"]
385
- # model_keywords = ["qwen2_5", "gpt2", "gemma2", "llama3"]
386
-
387
- # # # Optional: Define display names
388
- # # mappings = {
389
- # # "ioi_llama3": "IOI (LLaMA-3)",
390
- # # "ioi_qwen2_5": "IOI (Qwen-2.5)",
391
- # # "ioi_gpt2": "IOI (GPT-2)",
392
- # # "ioi_gemma2": "IOI (Gemma-2)",
393
- # # "mcqa_llama3": "MCQA (LLaMA-3)",
394
- # # "mcqa_qwen2_5": "MCQA (Qwen-2.5)",
395
- # # "mcqa_gemma2": "MCQA (Gemma-2)",
396
- # # "arithmetic_addition_llama3": "Arithmetic Addition (LLaMA-3)",
397
- # # "arithmetic_subtraction_llama3": "Arithmetic Subtraction (LLaMA-3)",
398
- # # "arc_easy_llama3": "ARC Easy (LLaMA-3)",
399
- # # "arc_easy_gemma2": "ARC Easy (Gemma-2)",
400
- # # "arc_challenge_llama3": "ARC Challenge (LLaMA-3)",
401
- # # "eval_name": "Evaluation Name",
402
- # # "Method": "Method",
403
- # # "Average": "Average Score"
404
- # # }
405
  # mappings = {}
406
 
407
- # # Create SmartSelectColumns instance
408
- # smart_columns = SmartSelectColumns(
409
- # benchmark_keywords=benchmark_keywords,
410
- # model_keywords=model_keywords,
411
- # column_mapping=mappings,
412
- # initial_selected=["Method", "Average"]
413
- # )
414
 
415
- # print("\nDebugging DataFrame columns:", renamed_df.columns.tolist())
416
 
417
- # # Create Leaderboard
418
- # leaderboard = Leaderboard(
419
- # value=renamed_df,
420
- # datatype=[c.type for c in fields(AutoEvalColumn_mib_subgraph)],
421
- # select_columns=smart_columns,
422
- # search_columns=["Method"],
423
- # hide_columns=[],
424
- # interactive=False
425
- # )
426
- # print(f"Successfully created leaderboard.")
427
- # return leaderboard
428
 
429
 
430
 
 
366
  # Important: We need to rename our DataFrame columns to match display names
367
  renamed_df = dataframe.rename(columns=display_mapping)
368
 
369
+ # return Leaderboard(
370
+ # value=renamed_df, # Use DataFrame with display names
371
+ # datatype=[c.type for c in fields(AutoEvalColumn_mib_subgraph)],
372
+ # select_columns=SelectColumns(
373
+ # default_selection=all_columns, # Now contains display names
374
+ # label="Select Results:"
375
+ # ),
376
+ # search_columns=["Method"],
377
+ # hide_columns=[],
378
+ # interactive=False,
379
+ # )
380
 
381
+ # Complete column groups for both benchmarks and models
382
+ # Define keywords for filtering
383
+ benchmark_keywords = ["ioi", "mcqa", "arithmetic_addition", "arithmetic_subtraction", "arc_easy", "arc_challenge"]
384
+ model_keywords = ["qwen2_5", "gpt2", "gemma2", "llama3"]
385
+
386
+ # Optional: Define display names
387
+ mappings = {
388
+ "ioi_llama3": "IOI (LLaMA-3)",
389
+ "ioi_qwen2_5": "IOI (Qwen-2.5)",
390
+ "ioi_gpt2": "IOI (GPT-2)",
391
+ "ioi_gemma2": "IOI (Gemma-2)",
392
+ "mcqa_llama3": "MCQA (LLaMA-3)",
393
+ "mcqa_qwen2_5": "MCQA (Qwen-2.5)",
394
+ "mcqa_gemma2": "MCQA (Gemma-2)",
395
+ "arithmetic_addition_llama3": "Arithmetic Addition (LLaMA-3)",
396
+ "arithmetic_subtraction_llama3": "Arithmetic Subtraction (LLaMA-3)",
397
+ "arc_easy_llama3": "ARC Easy (LLaMA-3)",
398
+ "arc_easy_gemma2": "ARC Easy (Gemma-2)",
399
+ "arc_challenge_llama3": "ARC Challenge (LLaMA-3)",
400
+ "eval_name": "Evaluation Name",
401
+ "Method": "Method",
402
+ "Average": "Average Score"
403
+ }
 
404
  # mappings = {}
405
 
406
+ # Create SmartSelectColumns instance
407
+ smart_columns = SmartSelectColumns(
408
+ benchmark_keywords=benchmark_keywords,
409
+ model_keywords=model_keywords,
410
+ column_mapping=mappings,
411
+ initial_selected=["Method", "Average"]
412
+ )
413
 
414
+ print("\nDebugging DataFrame columns:", renamed_df.columns.tolist())
415
 
416
+ # Create Leaderboard
417
+ leaderboard = Leaderboard(
418
+ value=renamed_df,
419
+ datatype=[c.type for c in fields(AutoEvalColumn_mib_subgraph)],
420
+ select_columns=smart_columns,
421
+ search_columns=["Method"],
422
+ hide_columns=[],
423
+ interactive=False
424
+ )
425
+ print(f"Successfully created leaderboard.")
426
+ return leaderboard
427
 
428
 
429