jasonshaoshun commited on
Commit
cc32e03
·
1 Parent(s): 66f5701
Files changed (1) hide show
  1. app.py +81 -81
app.py CHANGED
@@ -128,122 +128,122 @@ from src.about import TasksMib_Subgraph
128
 
129
 
130
 
131
- # def init_leaderboard_mib_subgraph(dataframe, track):
132
- # """Initialize the subgraph leaderboard with grouped column selection by benchmark."""
133
- # if dataframe is None or dataframe.empty:
134
- # raise ValueError("Leaderboard DataFrame is empty or None.")
135
-
136
- # print("\nDebugging DataFrame columns:", dataframe.columns.tolist())
137
-
138
- # # Create groups of columns by benchmark
139
- # benchmark_groups = []
140
-
141
- # # For each benchmark in our TasksMib_Subgraph enum...
142
- # for task in TasksMib_Subgraph:
143
- # benchmark = task.value.benchmark
144
- # # Get all valid columns for this benchmark's models
145
- # benchmark_cols = [
146
- # f"{benchmark}_{model}"
147
- # for model in task.value.models
148
- # if f"{benchmark}_{model}" in dataframe.columns
149
- # ]
150
- # if benchmark_cols: # Only add if we have valid columns
151
- # benchmark_groups.append(benchmark_cols)
152
- # print(f"\nBenchmark group for {benchmark}:", benchmark_cols)
153
-
154
- # # Create model groups as well
155
- # model_groups = []
156
- # all_models = list(set(model for task in TasksMib_Subgraph for model in task.value.models))
157
-
158
- # # For each unique model...
159
- # for model in all_models:
160
- # # Get all valid columns for this model across benchmarks
161
- # model_cols = [
162
- # f"{task.value.benchmark}_{model}"
163
- # for task in TasksMib_Subgraph
164
- # if model in task.value.models
165
- # and f"{task.value.benchmark}_{model}" in dataframe.columns
166
- # ]
167
- # if model_cols: # Only add if we have valid columns
168
- # model_groups.append(model_cols)
169
- # print(f"\nModel group for {model}:", model_cols)
170
-
171
- # # Combine all groups
172
- # all_groups = benchmark_groups + model_groups
173
-
174
- # # Flatten groups for default selection (show everything initially)
175
- # all_columns = [col for group in all_groups for col in group]
176
- # print("\nAll available columns:", all_columns)
177
-
178
- # return Leaderboard(
179
- # value=dataframe,
180
- # datatype=[c.type for c in fields(AutoEvalColumn_mib_subgraph)],
181
- # select_columns=SelectColumns(
182
- # default_selection=all_columns, # Show all columns initially
183
- # label="Select Results:"
184
- # ),
185
- # search_columns=["Method"],
186
- # hide_columns=[],
187
- # interactive=False,
188
- # )
189
-
190
  def init_leaderboard_mib_subgraph(dataframe, track):
191
- """Initialize the subgraph leaderboard with group-based column selection."""
192
  if dataframe is None or dataframe.empty:
193
  raise ValueError("Leaderboard DataFrame is empty or None.")
194
 
195
  print("\nDebugging DataFrame columns:", dataframe.columns.tolist())
196
 
197
- # Create selection mapping for benchmark groups
198
- selection_mapping = {}
199
 
200
- # Create benchmark groups with descriptive names
201
  for task in TasksMib_Subgraph:
202
  benchmark = task.value.benchmark
203
- # Get all columns for this benchmark's models
204
  benchmark_cols = [
205
  f"{benchmark}_{model}"
206
  for model in task.value.models
207
  if f"{benchmark}_{model}" in dataframe.columns
208
  ]
209
- if benchmark_cols:
210
- # Use a descriptive group name as the key
211
- group_name = f"Benchmark: {benchmark.upper()}"
212
- selection_mapping[group_name] = benchmark_cols
213
- print(f"\n{group_name} maps to:", benchmark_cols)
214
 
215
- # Create model groups with descriptive names
 
216
  all_models = list(set(model for task in TasksMib_Subgraph for model in task.value.models))
 
 
217
  for model in all_models:
218
- # Get all columns for this model across benchmarks
219
  model_cols = [
220
  f"{task.value.benchmark}_{model}"
221
  for task in TasksMib_Subgraph
222
  if model in task.value.models
223
  and f"{task.value.benchmark}_{model}" in dataframe.columns
224
  ]
225
- if model_cols:
226
- # Use a descriptive group name as the key
227
- group_name = f"Model: {model}"
228
- selection_mapping[group_name] = model_cols
229
- print(f"\n{group_name} maps to:", model_cols)
230
 
231
- # The selection options are the group names
232
- selection_options = list(selection_mapping.keys())
233
- print("\nSelection options:", selection_options)
 
 
 
234
 
235
  return Leaderboard(
236
  value=dataframe,
237
  datatype=[c.type for c in fields(AutoEvalColumn_mib_subgraph)],
238
  select_columns=SelectColumns(
239
- default_selection=selection_options, # Show all groups by default
240
- label="Select Benchmark or Model Groups:"
241
  ),
242
  search_columns=["Method"],
243
  hide_columns=[],
244
  interactive=False,
245
  )
246
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
247
 
248
 
249
 
 
128
 
129
 
130
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
131
  def init_leaderboard_mib_subgraph(dataframe, track):
132
+ """Initialize the subgraph leaderboard with grouped column selection by benchmark."""
133
  if dataframe is None or dataframe.empty:
134
  raise ValueError("Leaderboard DataFrame is empty or None.")
135
 
136
  print("\nDebugging DataFrame columns:", dataframe.columns.tolist())
137
 
138
+ # Create groups of columns by benchmark
139
+ benchmark_groups = []
140
 
141
+ # For each benchmark in our TasksMib_Subgraph enum...
142
  for task in TasksMib_Subgraph:
143
  benchmark = task.value.benchmark
144
+ # Get all valid columns for this benchmark's models
145
  benchmark_cols = [
146
  f"{benchmark}_{model}"
147
  for model in task.value.models
148
  if f"{benchmark}_{model}" in dataframe.columns
149
  ]
150
+ if benchmark_cols: # Only add if we have valid columns
151
+ benchmark_groups.append(benchmark_cols)
152
+ print(f"\nBenchmark group for {benchmark}:", benchmark_cols)
 
 
153
 
154
+ # Create model groups as well
155
+ model_groups = []
156
  all_models = list(set(model for task in TasksMib_Subgraph for model in task.value.models))
157
+
158
+ # For each unique model...
159
  for model in all_models:
160
+ # Get all valid columns for this model across benchmarks
161
  model_cols = [
162
  f"{task.value.benchmark}_{model}"
163
  for task in TasksMib_Subgraph
164
  if model in task.value.models
165
  and f"{task.value.benchmark}_{model}" in dataframe.columns
166
  ]
167
+ if model_cols: # Only add if we have valid columns
168
+ model_groups.append(model_cols)
169
+ print(f"\nModel group for {model}:", model_cols)
 
 
170
 
171
+ # Combine all groups
172
+ all_groups = benchmark_groups + model_groups
173
+
174
+ # Flatten groups for default selection (show everything initially)
175
+ all_columns = [col for group in all_groups for col in group]
176
+ print("\nAll available columns:", all_columns)
177
 
178
  return Leaderboard(
179
  value=dataframe,
180
  datatype=[c.type for c in fields(AutoEvalColumn_mib_subgraph)],
181
  select_columns=SelectColumns(
182
+ default_selection=all_columns, # Show all columns initially
183
+ label="Select Results:"
184
  ),
185
  search_columns=["Method"],
186
  hide_columns=[],
187
  interactive=False,
188
  )
189
 
190
+ # def init_leaderboard_mib_subgraph(dataframe, track):
191
+ # """Initialize the subgraph leaderboard with group-based column selection."""
192
+ # if dataframe is None or dataframe.empty:
193
+ # raise ValueError("Leaderboard DataFrame is empty or None.")
194
+
195
+ # print("\nDebugging DataFrame columns:", dataframe.columns.tolist())
196
+
197
+ # # Create selection mapping for benchmark groups
198
+ # selection_mapping = {}
199
+
200
+ # # Create benchmark groups with descriptive names
201
+ # for task in TasksMib_Subgraph:
202
+ # benchmark = task.value.benchmark
203
+ # # Get all columns for this benchmark's models
204
+ # benchmark_cols = [
205
+ # f"{benchmark}_{model}"
206
+ # for model in task.value.models
207
+ # if f"{benchmark}_{model}" in dataframe.columns
208
+ # ]
209
+ # if benchmark_cols:
210
+ # # Use a descriptive group name as the key
211
+ # group_name = f"Benchmark: {benchmark.upper()}"
212
+ # selection_mapping[group_name] = benchmark_cols
213
+ # print(f"\n{group_name} maps to:", benchmark_cols)
214
+
215
+ # # Create model groups with descriptive names
216
+ # all_models = list(set(model for task in TasksMib_Subgraph for model in task.value.models))
217
+ # for model in all_models:
218
+ # # Get all columns for this model across benchmarks
219
+ # model_cols = [
220
+ # f"{task.value.benchmark}_{model}"
221
+ # for task in TasksMib_Subgraph
222
+ # if model in task.value.models
223
+ # and f"{task.value.benchmark}_{model}" in dataframe.columns
224
+ # ]
225
+ # if model_cols:
226
+ # # Use a descriptive group name as the key
227
+ # group_name = f"Model: {model}"
228
+ # selection_mapping[group_name] = model_cols
229
+ # print(f"\n{group_name} maps to:", model_cols)
230
+
231
+ # # The selection options are the group names
232
+ # selection_options = list(selection_mapping.keys())
233
+ # print("\nSelection options:", selection_options)
234
+
235
+ # return Leaderboard(
236
+ # value=dataframe,
237
+ # datatype=[c.type for c in fields(AutoEvalColumn_mib_subgraph)],
238
+ # select_columns=SelectColumns(
239
+ # default_selection=selection_options, # Show all groups by default
240
+ # label="Select Benchmark or Model Groups:"
241
+ # ),
242
+ # search_columns=["Method"],
243
+ # hide_columns=[],
244
+ # interactive=False,
245
+ # )
246
+
247
 
248
 
249