Spaces:
Running
Running
jasonshaoshun
commited on
Commit
·
dd7b655
1
Parent(s):
49218db
debug
Browse files
app.py
CHANGED
@@ -189,39 +189,41 @@ from src.about import TasksMib_Subgraph
|
|
189 |
|
190 |
|
191 |
def init_leaderboard_mib_subgraph(dataframe, track):
|
192 |
-
"""Initialize the subgraph leaderboard with
|
193 |
if dataframe is None or dataframe.empty:
|
194 |
raise ValueError("Leaderboard DataFrame is empty or None.")
|
195 |
|
196 |
print("\nDebugging DataFrame columns:", dataframe.columns.tolist())
|
197 |
|
198 |
-
# First, create
|
199 |
-
|
200 |
-
|
201 |
-
|
202 |
-
|
203 |
-
|
204 |
-
|
|
|
|
|
|
|
205 |
benchmark_groups = []
|
206 |
for task in TasksMib_Subgraph:
|
207 |
benchmark = task.value.benchmark
|
208 |
-
# Get all valid columns for this benchmark's models, using display names
|
209 |
benchmark_cols = [
|
210 |
-
f"{benchmark}
|
211 |
for model in task.value.models
|
212 |
-
if f"{benchmark}_{model}" in dataframe.columns
|
213 |
]
|
214 |
if benchmark_cols:
|
215 |
benchmark_groups.append(benchmark_cols)
|
216 |
print(f"\nBenchmark group for {benchmark}:", benchmark_cols)
|
217 |
|
218 |
-
#
|
219 |
model_groups = []
|
220 |
all_models = list(set(model for task in TasksMib_Subgraph for model in task.value.models))
|
221 |
|
222 |
for model in all_models:
|
223 |
model_cols = [
|
224 |
-
f"{task.value.benchmark}
|
225 |
for task in TasksMib_Subgraph
|
226 |
if model in task.value.models
|
227 |
and f"{task.value.benchmark}_{model}" in dataframe.columns
|
@@ -230,33 +232,25 @@ def init_leaderboard_mib_subgraph(dataframe, track):
|
|
230 |
model_groups.append(model_cols)
|
231 |
print(f"\nModel group for {model}:", model_cols)
|
232 |
|
233 |
-
# Combine
|
234 |
all_groups = benchmark_groups + model_groups
|
235 |
all_columns = [col for group in all_groups for col in group]
|
236 |
-
|
237 |
-
# Important: We need to rename
|
238 |
-
|
239 |
-
f"{task.value.benchmark}_{model}": f"{task.value.benchmark}({model})"
|
240 |
-
for task in TasksMib_Subgraph
|
241 |
-
for model in task.value.models
|
242 |
-
if f"{task.value.benchmark}_{model}" in dataframe.columns
|
243 |
-
}
|
244 |
-
|
245 |
-
# Create a copy of the DataFrame with renamed columns
|
246 |
-
display_df = dataframe.rename(columns=display_name_mapping)
|
247 |
|
248 |
return Leaderboard(
|
249 |
-
value=
|
250 |
datatype=[c.type for c in fields(AutoEvalColumn_mib_subgraph)],
|
251 |
select_columns=SelectColumns(
|
252 |
-
default_selection=all_columns,
|
253 |
label="Select Results:"
|
254 |
),
|
255 |
search_columns=["Method"],
|
256 |
hide_columns=[],
|
257 |
interactive=False,
|
258 |
)
|
259 |
-
|
260 |
|
261 |
|
262 |
# def init_leaderboard_mib_subgraph(dataframe, track):
|
|
|
189 |
|
190 |
|
191 |
def init_leaderboard_mib_subgraph(dataframe, track):
|
192 |
+
"""Initialize the subgraph leaderboard with display names for better readability."""
|
193 |
if dataframe is None or dataframe.empty:
|
194 |
raise ValueError("Leaderboard DataFrame is empty or None.")
|
195 |
|
196 |
print("\nDebugging DataFrame columns:", dataframe.columns.tolist())
|
197 |
|
198 |
+
# First, create our display name mapping
|
199 |
+
# This is like creating a translation dictionary between internal names and display names
|
200 |
+
display_mapping = {}
|
201 |
+
for task in TasksMib_Subgraph:
|
202 |
+
for model in task.value.models:
|
203 |
+
field_name = f"{task.value.benchmark}_{model}"
|
204 |
+
display_name = f"{task.value.benchmark}({model})"
|
205 |
+
display_mapping[field_name] = display_name
|
206 |
+
|
207 |
+
# Now when creating benchmark groups, we'll use display names
|
208 |
benchmark_groups = []
|
209 |
for task in TasksMib_Subgraph:
|
210 |
benchmark = task.value.benchmark
|
|
|
211 |
benchmark_cols = [
|
212 |
+
display_mapping[f"{benchmark}_{model}"] # Use display name from our mapping
|
213 |
for model in task.value.models
|
214 |
+
if f"{benchmark}_{model}" in dataframe.columns
|
215 |
]
|
216 |
if benchmark_cols:
|
217 |
benchmark_groups.append(benchmark_cols)
|
218 |
print(f"\nBenchmark group for {benchmark}:", benchmark_cols)
|
219 |
|
220 |
+
# Similarly for model groups
|
221 |
model_groups = []
|
222 |
all_models = list(set(model for task in TasksMib_Subgraph for model in task.value.models))
|
223 |
|
224 |
for model in all_models:
|
225 |
model_cols = [
|
226 |
+
display_mapping[f"{task.value.benchmark}_{model}"] # Use display name
|
227 |
for task in TasksMib_Subgraph
|
228 |
if model in task.value.models
|
229 |
and f"{task.value.benchmark}_{model}" in dataframe.columns
|
|
|
232 |
model_groups.append(model_cols)
|
233 |
print(f"\nModel group for {model}:", model_cols)
|
234 |
|
235 |
+
# Combine all groups using display names
|
236 |
all_groups = benchmark_groups + model_groups
|
237 |
all_columns = [col for group in all_groups for col in group]
|
238 |
+
|
239 |
+
# Important: We need to rename our DataFrame columns to match display names
|
240 |
+
renamed_df = dataframe.rename(columns=display_mapping)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
241 |
|
242 |
return Leaderboard(
|
243 |
+
value=renamed_df, # Use DataFrame with display names
|
244 |
datatype=[c.type for c in fields(AutoEvalColumn_mib_subgraph)],
|
245 |
select_columns=SelectColumns(
|
246 |
+
default_selection=all_columns, # Now contains display names
|
247 |
label="Select Results:"
|
248 |
),
|
249 |
search_columns=["Method"],
|
250 |
hide_columns=[],
|
251 |
interactive=False,
|
252 |
)
|
253 |
+
|
254 |
|
255 |
|
256 |
# def init_leaderboard_mib_subgraph(dataframe, track):
|