Spaces:
Running
Running
jasonshaoshun
commited on
Commit
·
59781bb
1
Parent(s):
9810745
debug
Browse files
app.py
CHANGED
@@ -366,63 +366,65 @@ def init_leaderboard_mib_subgraph(dataframe, track):
|
|
366 |
# Important: We need to rename our DataFrame columns to match display names
|
367 |
renamed_df = dataframe.rename(columns=display_mapping)
|
368 |
|
369 |
-
|
370 |
-
|
371 |
-
# datatype=[c.type for c in fields(AutoEvalColumn_mib_subgraph)],
|
372 |
-
# select_columns=SelectColumns(
|
373 |
-
# default_selection=all_columns, # Now contains display names
|
374 |
-
# label="Select Results:"
|
375 |
-
# ),
|
376 |
-
# search_columns=["Method"],
|
377 |
-
# hide_columns=[],
|
378 |
-
# interactive=False,
|
379 |
-
# )
|
380 |
-
# Complete column groups for both benchmarks and models
|
381 |
-
# Define keywords for filtering
|
382 |
-
benchmark_keywords = ["ioi", "mcqa", "arithmetic_addition", "arithmetic_subtraction", "arc_easy", "arc_challenge"]
|
383 |
-
model_keywords = ["qwen2_5", "gpt2", "gemma2", "llama3"]
|
384 |
-
|
385 |
-
# # Optional: Define display names
|
386 |
-
# mappings = {
|
387 |
-
# "ioi_llama3": "IOI (LLaMA-3)",
|
388 |
-
# "ioi_qwen2_5": "IOI (Qwen-2.5)",
|
389 |
-
# "ioi_gpt2": "IOI (GPT-2)",
|
390 |
-
# "ioi_gemma2": "IOI (Gemma-2)",
|
391 |
-
# "mcqa_llama3": "MCQA (LLaMA-3)",
|
392 |
-
# "mcqa_qwen2_5": "MCQA (Qwen-2.5)",
|
393 |
-
# "mcqa_gemma2": "MCQA (Gemma-2)",
|
394 |
-
# "arithmetic_addition_llama3": "Arithmetic Addition (LLaMA-3)",
|
395 |
-
# "arithmetic_subtraction_llama3": "Arithmetic Subtraction (LLaMA-3)",
|
396 |
-
# "arc_easy_llama3": "ARC Easy (LLaMA-3)",
|
397 |
-
# "arc_easy_gemma2": "ARC Easy (Gemma-2)",
|
398 |
-
# "arc_challenge_llama3": "ARC Challenge (LLaMA-3)",
|
399 |
-
# "eval_name": "Evaluation Name",
|
400 |
-
# "Method": "Method",
|
401 |
-
# "Average": "Average Score"
|
402 |
-
# }
|
403 |
-
mappings = {}
|
404 |
-
|
405 |
-
# Create SmartSelectColumns instance
|
406 |
-
smart_columns = SmartSelectColumns(
|
407 |
-
benchmark_keywords=benchmark_keywords,
|
408 |
-
model_keywords=model_keywords,
|
409 |
-
column_mapping=mappings,
|
410 |
-
initial_selected=["Method", "Average"]
|
411 |
-
)
|
412 |
-
|
413 |
-
print("\nDebugging DataFrame columns:", renamed_df.columns.tolist())
|
414 |
-
|
415 |
-
# Create Leaderboard
|
416 |
-
leaderboard = Leaderboard(
|
417 |
-
value=renamed_df,
|
418 |
datatype=[c.type for c in fields(AutoEvalColumn_mib_subgraph)],
|
419 |
-
select_columns=
|
|
|
|
|
|
|
420 |
search_columns=["Method"],
|
421 |
hide_columns=[],
|
422 |
-
interactive=False
|
423 |
)
|
424 |
-
|
425 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
426 |
|
427 |
|
428 |
|
|
|
366 |
# Important: We need to rename our DataFrame columns to match display names
|
367 |
renamed_df = dataframe.rename(columns=display_mapping)
|
368 |
|
369 |
+
return Leaderboard(
|
370 |
+
value=renamed_df, # Use DataFrame with display names
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
371 |
datatype=[c.type for c in fields(AutoEvalColumn_mib_subgraph)],
|
372 |
+
select_columns=SelectColumns(
|
373 |
+
default_selection=all_columns, # Now contains display names
|
374 |
+
label="Select Results:"
|
375 |
+
),
|
376 |
search_columns=["Method"],
|
377 |
hide_columns=[],
|
378 |
+
interactive=False,
|
379 |
)
|
380 |
+
|
381 |
+
|
382 |
+
# # Complete column groups for both benchmarks and models
|
383 |
+
# # Define keywords for filtering
|
384 |
+
# benchmark_keywords = ["ioi", "mcqa", "arithmetic_addition", "arithmetic_subtraction", "arc_easy", "arc_challenge"]
|
385 |
+
# model_keywords = ["qwen2_5", "gpt2", "gemma2", "llama3"]
|
386 |
+
|
387 |
+
# # # Optional: Define display names
|
388 |
+
# # mappings = {
|
389 |
+
# # "ioi_llama3": "IOI (LLaMA-3)",
|
390 |
+
# # "ioi_qwen2_5": "IOI (Qwen-2.5)",
|
391 |
+
# # "ioi_gpt2": "IOI (GPT-2)",
|
392 |
+
# # "ioi_gemma2": "IOI (Gemma-2)",
|
393 |
+
# # "mcqa_llama3": "MCQA (LLaMA-3)",
|
394 |
+
# # "mcqa_qwen2_5": "MCQA (Qwen-2.5)",
|
395 |
+
# # "mcqa_gemma2": "MCQA (Gemma-2)",
|
396 |
+
# # "arithmetic_addition_llama3": "Arithmetic Addition (LLaMA-3)",
|
397 |
+
# # "arithmetic_subtraction_llama3": "Arithmetic Subtraction (LLaMA-3)",
|
398 |
+
# # "arc_easy_llama3": "ARC Easy (LLaMA-3)",
|
399 |
+
# # "arc_easy_gemma2": "ARC Easy (Gemma-2)",
|
400 |
+
# # "arc_challenge_llama3": "ARC Challenge (LLaMA-3)",
|
401 |
+
# # "eval_name": "Evaluation Name",
|
402 |
+
# # "Method": "Method",
|
403 |
+
# # "Average": "Average Score"
|
404 |
+
# # }
|
405 |
+
# mappings = {}
|
406 |
+
|
407 |
+
# # Create SmartSelectColumns instance
|
408 |
+
# smart_columns = SmartSelectColumns(
|
409 |
+
# benchmark_keywords=benchmark_keywords,
|
410 |
+
# model_keywords=model_keywords,
|
411 |
+
# column_mapping=mappings,
|
412 |
+
# initial_selected=["Method", "Average"]
|
413 |
+
# )
|
414 |
+
|
415 |
+
# print("\nDebugging DataFrame columns:", renamed_df.columns.tolist())
|
416 |
+
|
417 |
+
# # Create Leaderboard
|
418 |
+
# leaderboard = Leaderboard(
|
419 |
+
# value=renamed_df,
|
420 |
+
# datatype=[c.type for c in fields(AutoEvalColumn_mib_subgraph)],
|
421 |
+
# select_columns=smart_columns,
|
422 |
+
# search_columns=["Method"],
|
423 |
+
# hide_columns=[],
|
424 |
+
# interactive=False
|
425 |
+
# )
|
426 |
+
# print(f"Successfully created leaderboard.")
|
427 |
+
# return leaderboard
|
428 |
|
429 |
|
430 |
|