Spaces:
Running
Running
jasonshaoshun
commited on
Commit
·
d9fb05e
1
Parent(s):
200beb2
debug
Browse files
app.py
CHANGED
@@ -54,25 +54,28 @@ from dataclasses import fields
|
|
54 |
|
55 |
class SmartSelectColumns(SelectColumns):
|
56 |
"""
|
57 |
-
Enhanced SelectColumns component for gradio_leaderboard with
|
58 |
"""
|
59 |
def __init__(
|
60 |
self,
|
61 |
-
|
|
|
62 |
column_mapping: Optional[Dict[str, str]] = None,
|
63 |
initial_selected: Optional[List[str]] = None,
|
64 |
**kwargs
|
65 |
):
|
66 |
"""
|
67 |
-
Initialize SmartSelectColumns with
|
68 |
|
69 |
Args:
|
70 |
-
|
|
|
71 |
column_mapping: Dict mapping actual column names to display names
|
72 |
initial_selected: List of columns to show initially
|
73 |
"""
|
74 |
super().__init__(**kwargs)
|
75 |
-
self.
|
|
|
76 |
self.column_mapping = column_mapping or {}
|
77 |
self.reverse_mapping = {v: k for k, v in self.column_mapping.items()} if column_mapping else {}
|
78 |
self.initial_selected = initial_selected or []
|
@@ -85,6 +88,40 @@ class SmartSelectColumns(SelectColumns):
|
|
85 |
"""Transform actual column names to display names."""
|
86 |
return [self.column_mapping.get(col, col) for col in y]
|
87 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
88 |
def update(
|
89 |
self,
|
90 |
value: Union[pd.DataFrame, Dict[str, List[str]], Any]
|
@@ -97,14 +134,8 @@ class SmartSelectColumns(SelectColumns):
|
|
97 |
# Use initial selection or default columns
|
98 |
selected = self.initial_selected if self.initial_selected else choices
|
99 |
|
100 |
-
#
|
101 |
-
filtered_cols =
|
102 |
-
for group_name, columns in self.column_groups.items():
|
103 |
-
filtered_cols[group_name] = [
|
104 |
-
self.column_mapping.get(col, col)
|
105 |
-
for col in columns
|
106 |
-
if col in value.columns
|
107 |
-
]
|
108 |
|
109 |
return {
|
110 |
"choices": choices,
|
@@ -134,7 +165,6 @@ class SmartSelectColumns(SelectColumns):
|
|
134 |
|
135 |
|
136 |
|
137 |
-
|
138 |
def restart_space():
|
139 |
API.restart_space(repo_id=REPO_ID)
|
140 |
|
@@ -348,52 +378,24 @@ def init_leaderboard_mib_subgraph(dataframe, track):
|
|
348 |
# interactive=False,
|
349 |
# )
|
350 |
# Complete column groups for both benchmarks and models
|
351 |
-
|
352 |
-
|
353 |
-
|
354 |
-
"Benchmark group for mcqa": ["mcqa_qwen2_5", "mcqa_gemma2", "mcqa_llama3"],
|
355 |
-
"Benchmark group for arithmetic_addition": ["arithmetic_addition_llama3"],
|
356 |
-
"Benchmark group for arithmetic_subtraction": ["arithmetic_subtraction_llama3"],
|
357 |
-
"Benchmark group for arc_easy": ["arc_easy_gemma2", "arc_easy_llama3"],
|
358 |
-
"Benchmark group for arc_challenge": ["arc_challenge_llama3"],
|
359 |
-
|
360 |
-
# Model groups
|
361 |
-
"Model group for qwen2_5": ["ioi_qwen2_5", "mcqa_qwen2_5"],
|
362 |
-
"Model group for gpt2": ["ioi_gpt2"],
|
363 |
-
"Model group for gemma2": ["ioi_gemma2", "mcqa_gemma2", "arc_easy_gemma2"],
|
364 |
-
"Model group for llama3": [
|
365 |
-
"ioi_llama3",
|
366 |
-
"mcqa_llama3",
|
367 |
-
"arithmetic_addition_llama3",
|
368 |
-
"arithmetic_subtraction_llama3",
|
369 |
-
"arc_easy_llama3",
|
370 |
-
"arc_challenge_llama3"
|
371 |
-
]
|
372 |
-
}
|
373 |
|
374 |
-
# #
|
375 |
# mappings = {
|
376 |
-
# # IOI benchmark mappings
|
377 |
# "ioi_llama3": "IOI (LLaMA-3)",
|
378 |
# "ioi_qwen2_5": "IOI (Qwen-2.5)",
|
379 |
# "ioi_gpt2": "IOI (GPT-2)",
|
380 |
# "ioi_gemma2": "IOI (Gemma-2)",
|
381 |
-
|
382 |
-
# # MCQA benchmark mappings
|
383 |
# "mcqa_llama3": "MCQA (LLaMA-3)",
|
384 |
# "mcqa_qwen2_5": "MCQA (Qwen-2.5)",
|
385 |
# "mcqa_gemma2": "MCQA (Gemma-2)",
|
386 |
-
|
387 |
-
# # Arithmetic benchmark mappings
|
388 |
# "arithmetic_addition_llama3": "Arithmetic Addition (LLaMA-3)",
|
389 |
# "arithmetic_subtraction_llama3": "Arithmetic Subtraction (LLaMA-3)",
|
390 |
-
|
391 |
-
# # ARC benchmark mappings
|
392 |
# "arc_easy_llama3": "ARC Easy (LLaMA-3)",
|
393 |
# "arc_easy_gemma2": "ARC Easy (Gemma-2)",
|
394 |
# "arc_challenge_llama3": "ARC Challenge (LLaMA-3)",
|
395 |
-
|
396 |
-
# # Other columns
|
397 |
# "eval_name": "Evaluation Name",
|
398 |
# "Method": "Method",
|
399 |
# "Average": "Average Score"
|
@@ -402,12 +404,13 @@ def init_leaderboard_mib_subgraph(dataframe, track):
|
|
402 |
|
403 |
# Create SmartSelectColumns instance
|
404 |
smart_columns = SmartSelectColumns(
|
405 |
-
|
|
|
406 |
column_mapping=mappings,
|
407 |
initial_selected=["Method", "Average"]
|
408 |
)
|
409 |
|
410 |
-
# Create Leaderboard
|
411 |
leaderboard = Leaderboard(
|
412 |
value=renamed_df,
|
413 |
datatype=[c.type for c in fields(AutoEvalColumn_mib_subgraph)],
|
@@ -422,6 +425,8 @@ def init_leaderboard_mib_subgraph(dataframe, track):
|
|
422 |
|
423 |
|
424 |
|
|
|
|
|
425 |
# def init_leaderboard_mib_subgraph(dataframe, track):
|
426 |
# """Initialize the subgraph leaderboard with group-based column selection."""
|
427 |
# if dataframe is None or dataframe.empty:
|
|
|
54 |
|
55 |
class SmartSelectColumns(SelectColumns):
|
56 |
"""
|
57 |
+
Enhanced SelectColumns component for gradio_leaderboard with dynamic column filtering.
|
58 |
"""
|
59 |
def __init__(
|
60 |
self,
|
61 |
+
benchmark_keywords: Optional[List[str]] = None,
|
62 |
+
model_keywords: Optional[List[str]] = None,
|
63 |
column_mapping: Optional[Dict[str, str]] = None,
|
64 |
initial_selected: Optional[List[str]] = None,
|
65 |
**kwargs
|
66 |
):
|
67 |
"""
|
68 |
+
Initialize SmartSelectColumns with dynamic filtering.
|
69 |
|
70 |
Args:
|
71 |
+
benchmark_keywords: List of benchmark names to filter by (e.g., ["ioi", "mcqa"])
|
72 |
+
model_keywords: List of model names to filter by (e.g., ["llama3", "qwen2_5"])
|
73 |
column_mapping: Dict mapping actual column names to display names
|
74 |
initial_selected: List of columns to show initially
|
75 |
"""
|
76 |
super().__init__(**kwargs)
|
77 |
+
self.benchmark_keywords = benchmark_keywords or []
|
78 |
+
self.model_keywords = model_keywords or []
|
79 |
self.column_mapping = column_mapping or {}
|
80 |
self.reverse_mapping = {v: k for k, v in self.column_mapping.items()} if column_mapping else {}
|
81 |
self.initial_selected = initial_selected or []
|
|
|
88 |
"""Transform actual column names to display names."""
|
89 |
return [self.column_mapping.get(col, col) for col in y]
|
90 |
|
91 |
+
def get_filtered_groups(self, df: pd.DataFrame) -> Dict[str, List[str]]:
|
92 |
+
"""
|
93 |
+
Dynamically create column groups based on keywords.
|
94 |
+
"""
|
95 |
+
filtered_groups = {}
|
96 |
+
|
97 |
+
# Create benchmark groups
|
98 |
+
for benchmark in self.benchmark_keywords:
|
99 |
+
matching_cols = [
|
100 |
+
col for col in df.columns
|
101 |
+
if benchmark in col.lower()
|
102 |
+
]
|
103 |
+
if matching_cols:
|
104 |
+
group_name = f"Benchmark group for {benchmark}"
|
105 |
+
filtered_groups[group_name] = [
|
106 |
+
self.column_mapping.get(col, col)
|
107 |
+
for col in matching_cols
|
108 |
+
]
|
109 |
+
|
110 |
+
# Create model groups
|
111 |
+
for model in self.model_keywords:
|
112 |
+
matching_cols = [
|
113 |
+
col for col in df.columns
|
114 |
+
if model in col.lower()
|
115 |
+
]
|
116 |
+
if matching_cols:
|
117 |
+
group_name = f"Model group for {model}"
|
118 |
+
filtered_groups[group_name] = [
|
119 |
+
self.column_mapping.get(col, col)
|
120 |
+
for col in matching_cols
|
121 |
+
]
|
122 |
+
|
123 |
+
return filtered_groups
|
124 |
+
|
125 |
def update(
|
126 |
self,
|
127 |
value: Union[pd.DataFrame, Dict[str, List[str]], Any]
|
|
|
134 |
# Use initial selection or default columns
|
135 |
selected = self.initial_selected if self.initial_selected else choices
|
136 |
|
137 |
+
# Get dynamically filtered groups
|
138 |
+
filtered_cols = self.get_filtered_groups(value)
|
|
|
|
|
|
|
|
|
|
|
|
|
139 |
|
140 |
return {
|
141 |
"choices": choices,
|
|
|
165 |
|
166 |
|
167 |
|
|
|
168 |
def restart_space():
|
169 |
API.restart_space(repo_id=REPO_ID)
|
170 |
|
|
|
378 |
# interactive=False,
|
379 |
# )
|
380 |
# Complete column groups for both benchmarks and models
|
381 |
+
# Define keywords for filtering
|
382 |
+
benchmark_keywords = ["ioi", "mcqa", "arithmetic_addition", "arithmetic_subtraction", "arc_easy", "arc_challenge"]
|
383 |
+
model_keywords = ["qwen2_5", "gpt2", "gemma2", "llama3"]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
384 |
|
385 |
+
# # Optional: Define display names
|
386 |
# mappings = {
|
|
|
387 |
# "ioi_llama3": "IOI (LLaMA-3)",
|
388 |
# "ioi_qwen2_5": "IOI (Qwen-2.5)",
|
389 |
# "ioi_gpt2": "IOI (GPT-2)",
|
390 |
# "ioi_gemma2": "IOI (Gemma-2)",
|
|
|
|
|
391 |
# "mcqa_llama3": "MCQA (LLaMA-3)",
|
392 |
# "mcqa_qwen2_5": "MCQA (Qwen-2.5)",
|
393 |
# "mcqa_gemma2": "MCQA (Gemma-2)",
|
|
|
|
|
394 |
# "arithmetic_addition_llama3": "Arithmetic Addition (LLaMA-3)",
|
395 |
# "arithmetic_subtraction_llama3": "Arithmetic Subtraction (LLaMA-3)",
|
|
|
|
|
396 |
# "arc_easy_llama3": "ARC Easy (LLaMA-3)",
|
397 |
# "arc_easy_gemma2": "ARC Easy (Gemma-2)",
|
398 |
# "arc_challenge_llama3": "ARC Challenge (LLaMA-3)",
|
|
|
|
|
399 |
# "eval_name": "Evaluation Name",
|
400 |
# "Method": "Method",
|
401 |
# "Average": "Average Score"
|
|
|
404 |
|
405 |
# Create SmartSelectColumns instance
|
406 |
smart_columns = SmartSelectColumns(
|
407 |
+
benchmark_keywords=benchmark_keywords,
|
408 |
+
model_keywords=model_keywords,
|
409 |
column_mapping=mappings,
|
410 |
initial_selected=["Method", "Average"]
|
411 |
)
|
412 |
|
413 |
+
# Create Leaderboard
|
414 |
leaderboard = Leaderboard(
|
415 |
value=renamed_df,
|
416 |
datatype=[c.type for c in fields(AutoEvalColumn_mib_subgraph)],
|
|
|
425 |
|
426 |
|
427 |
|
428 |
+
|
429 |
+
|
430 |
# def init_leaderboard_mib_subgraph(dataframe, track):
|
431 |
# """Initialize the subgraph leaderboard with group-based column selection."""
|
432 |
# if dataframe is None or dataframe.empty:
|