Spaces:
Running
Running
jasonshaoshun
commited on
Commit
·
00daaaf
1
Parent(s):
63ac8f7
debug
Browse files
app.py
CHANGED
@@ -262,30 +262,30 @@ LEADERBOARD_DF_MIB_CAUSALGRAPH_DETAILED, LEADERBOARD_DF_MIB_CAUSALGRAPH_AGGREGAT
|
|
262 |
|
263 |
|
264 |
|
265 |
-
|
266 |
-
#
|
267 |
|
268 |
-
|
269 |
-
|
270 |
|
271 |
-
#
|
272 |
-
#
|
273 |
|
274 |
-
#
|
275 |
|
276 |
-
|
277 |
-
|
278 |
-
|
279 |
-
|
280 |
-
|
281 |
-
|
282 |
-
|
283 |
-
|
284 |
-
|
285 |
-
|
286 |
-
|
287 |
-
|
288 |
-
|
289 |
|
290 |
|
291 |
|
@@ -351,70 +351,72 @@ LEADERBOARD_DF_MIB_CAUSALGRAPH_DETAILED, LEADERBOARD_DF_MIB_CAUSALGRAPH_AGGREGAT
|
|
351 |
# )
|
352 |
|
353 |
|
354 |
-
def init_leaderboard_mib_subgraph(dataframe, track):
|
355 |
-
"""Initialize the subgraph leaderboard with display names for better readability."""
|
356 |
-
if dataframe is None or dataframe.empty:
|
357 |
-
raise ValueError("Leaderboard DataFrame is empty or None.")
|
358 |
|
359 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
360 |
|
361 |
-
|
362 |
-
|
363 |
-
|
364 |
-
|
365 |
-
|
366 |
-
|
367 |
-
|
368 |
-
|
369 |
-
|
370 |
-
|
371 |
-
|
372 |
-
|
373 |
-
|
374 |
-
|
375 |
-
|
376 |
-
|
377 |
-
|
378 |
-
|
379 |
-
|
380 |
-
|
381 |
-
|
382 |
-
|
383 |
-
|
384 |
-
|
385 |
-
|
386 |
|
387 |
-
|
388 |
-
|
389 |
-
|
390 |
-
|
391 |
-
|
392 |
-
|
393 |
-
|
394 |
-
|
395 |
-
|
396 |
-
|
397 |
-
|
398 |
-
|
399 |
-
|
400 |
-
|
401 |
-
|
402 |
-
|
403 |
-
|
404 |
-
|
405 |
-
|
406 |
-
|
407 |
-
|
408 |
-
|
409 |
-
|
410 |
-
|
411 |
-
|
412 |
-
|
413 |
-
|
414 |
-
|
415 |
-
|
416 |
-
|
417 |
-
|
418 |
|
419 |
|
420 |
|
|
|
262 |
|
263 |
|
264 |
|
265 |
+
def init_leaderboard_mib_subgraph(dataframe, track):
|
266 |
+
# print(f"init_leaderboard_mib: dataframe head before loc is {dataframe.head()}\n")
|
267 |
|
268 |
+
if dataframe is None or dataframe.empty:
|
269 |
+
raise ValueError("Leaderboard DataFrame is empty or None.")
|
270 |
|
271 |
+
# filter for correct track
|
272 |
+
# dataframe = dataframe.loc[dataframe["Track"] == track]
|
273 |
|
274 |
+
# print(f"init_leaderboard_mib: dataframe head after loc is {dataframe.head()}\n")
|
275 |
|
276 |
+
return Leaderboard(
|
277 |
+
value=dataframe,
|
278 |
+
datatype=[c.type for c in fields(AutoEvalColumn_mib_subgraph)],
|
279 |
+
select_columns=SelectColumns(
|
280 |
+
default_selection=[c.name for c in fields(AutoEvalColumn_mib_subgraph) if c.displayed_by_default],
|
281 |
+
cant_deselect=[c.name for c in fields(AutoEvalColumn_mib_subgraph) if c.never_hidden],
|
282 |
+
label="Select Columns to Display:",
|
283 |
+
),
|
284 |
+
search_columns=["Method"], # Changed from AutoEvalColumn_mib_subgraph.model.name to "Method"
|
285 |
+
hide_columns=[c.name for c in fields(AutoEvalColumn_mib_subgraph) if c.hidden],
|
286 |
+
bool_checkboxgroup_label="Hide models",
|
287 |
+
interactive=False,
|
288 |
+
)
|
289 |
|
290 |
|
291 |
|
|
|
351 |
# )
|
352 |
|
353 |
|
|
|
|
|
|
|
|
|
354 |
|
355 |
+
|
356 |
+
# def init_leaderboard_mib_subgraph(dataframe, track):
|
357 |
+
# """Initialize the subgraph leaderboard with display names for better readability."""
|
358 |
+
# if dataframe is None or dataframe.empty:
|
359 |
+
# raise ValueError("Leaderboard DataFrame is empty or None.")
|
360 |
+
|
361 |
+
# print("\nDebugging DataFrame columns:", dataframe.columns.tolist())
|
362 |
|
363 |
+
# # First, create our display name mapping
|
364 |
+
# # This is like creating a translation dictionary between internal names and display names
|
365 |
+
# display_mapping = {}
|
366 |
+
# for task in TasksMib_Subgraph:
|
367 |
+
# for model in task.value.models:
|
368 |
+
# field_name = f"{task.value.benchmark}_{model}"
|
369 |
+
# display_name = f"{task.value.benchmark}({model})"
|
370 |
+
# display_mapping[field_name] = display_name
|
371 |
+
|
372 |
+
# # Now when creating benchmark groups, we'll use display names
|
373 |
+
# benchmark_groups = []
|
374 |
+
# for task in TasksMib_Subgraph:
|
375 |
+
# benchmark = task.value.benchmark
|
376 |
+
# benchmark_cols = [
|
377 |
+
# display_mapping[f"{benchmark}_{model}"] # Use display name from our mapping
|
378 |
+
# for model in task.value.models
|
379 |
+
# if f"{benchmark}_{model}" in dataframe.columns
|
380 |
+
# ]
|
381 |
+
# if benchmark_cols:
|
382 |
+
# benchmark_groups.append(benchmark_cols)
|
383 |
+
# print(f"\nBenchmark group for {benchmark}:", benchmark_cols)
|
384 |
+
|
385 |
+
# # Similarly for model groups
|
386 |
+
# model_groups = []
|
387 |
+
# all_models = list(set(model for task in TasksMib_Subgraph for model in task.value.models))
|
388 |
|
389 |
+
# for model in all_models:
|
390 |
+
# model_cols = [
|
391 |
+
# display_mapping[f"{task.value.benchmark}_{model}"] # Use display name
|
392 |
+
# for task in TasksMib_Subgraph
|
393 |
+
# if model in task.value.models
|
394 |
+
# and f"{task.value.benchmark}_{model}" in dataframe.columns
|
395 |
+
# ]
|
396 |
+
# if model_cols:
|
397 |
+
# model_groups.append(model_cols)
|
398 |
+
# print(f"\nModel group for {model}:", model_cols)
|
399 |
+
|
400 |
+
# # Combine all groups using display names
|
401 |
+
# all_groups = benchmark_groups + model_groups
|
402 |
+
# all_columns = [col for group in all_groups for col in group]
|
403 |
+
|
404 |
+
# # Important: We need to rename our DataFrame columns to match display names
|
405 |
+
|
406 |
+
# renamed_df = dataframe.rename(columns=display_mapping)
|
407 |
+
|
408 |
+
# # Original code
|
409 |
+
# return Leaderboard(
|
410 |
+
# value=renamed_df, # Use DataFrame with display names
|
411 |
+
# datatype=[c.type for c in fields(AutoEvalColumn_mib_subgraph)],
|
412 |
+
# select_columns=SelectColumns(
|
413 |
+
# default_selection=all_columns, # Now contains display names
|
414 |
+
# label="Select Results:"
|
415 |
+
# ),
|
416 |
+
# search_columns=["Method"],
|
417 |
+
# hide_columns=[],
|
418 |
+
# interactive=False,
|
419 |
+
# )
|
420 |
|
421 |
|
422 |
|