Spaces:
Running
Running
jasonshaoshun
commited on
Commit
·
49218db
1
Parent(s):
53c242a
debug
Browse files
app.py
CHANGED
@@ -128,6 +128,66 @@ from src.about import TasksMib_Subgraph
|
|
128 |
|
129 |
|
130 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
131 |
def init_leaderboard_mib_subgraph(dataframe, track):
|
132 |
"""Initialize the subgraph leaderboard with grouped column selection by benchmark."""
|
133 |
if dataframe is None or dataframe.empty:
|
@@ -135,57 +195,69 @@ def init_leaderboard_mib_subgraph(dataframe, track):
|
|
135 |
|
136 |
print("\nDebugging DataFrame columns:", dataframe.columns.tolist())
|
137 |
|
|
|
|
|
|
|
|
|
|
|
|
|
138 |
# Create groups of columns by benchmark
|
139 |
benchmark_groups = []
|
140 |
-
|
141 |
-
# For each benchmark in our TasksMib_Subgraph enum...
|
142 |
for task in TasksMib_Subgraph:
|
143 |
benchmark = task.value.benchmark
|
144 |
-
# Get all valid columns for this benchmark's models
|
145 |
benchmark_cols = [
|
146 |
-
f"{benchmark}
|
147 |
for model in task.value.models
|
148 |
-
if f"{benchmark}_{model}" in dataframe.columns
|
149 |
]
|
150 |
-
if benchmark_cols:
|
151 |
benchmark_groups.append(benchmark_cols)
|
152 |
print(f"\nBenchmark group for {benchmark}:", benchmark_cols)
|
153 |
|
154 |
-
# Create model groups
|
155 |
model_groups = []
|
156 |
all_models = list(set(model for task in TasksMib_Subgraph for model in task.value.models))
|
157 |
|
158 |
-
# For each unique model...
|
159 |
for model in all_models:
|
160 |
-
# Get all valid columns for this model across benchmarks
|
161 |
model_cols = [
|
162 |
-
f"{task.value.benchmark}
|
163 |
for task in TasksMib_Subgraph
|
164 |
if model in task.value.models
|
165 |
and f"{task.value.benchmark}_{model}" in dataframe.columns
|
166 |
]
|
167 |
-
if model_cols:
|
168 |
model_groups.append(model_cols)
|
169 |
print(f"\nModel group for {model}:", model_cols)
|
170 |
|
171 |
-
# Combine
|
172 |
all_groups = benchmark_groups + model_groups
|
173 |
-
|
174 |
-
# Flatten groups for default selection (show everything initially)
|
175 |
all_columns = [col for group in all_groups for col in group]
|
176 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
177 |
|
178 |
return Leaderboard(
|
179 |
-
value=
|
180 |
datatype=[c.type for c in fields(AutoEvalColumn_mib_subgraph)],
|
181 |
select_columns=SelectColumns(
|
182 |
-
default_selection=all_columns,
|
183 |
label="Select Results:"
|
184 |
),
|
185 |
search_columns=["Method"],
|
186 |
hide_columns=[],
|
187 |
interactive=False,
|
188 |
)
|
|
|
|
|
189 |
|
190 |
# def init_leaderboard_mib_subgraph(dataframe, track):
|
191 |
# """Initialize the subgraph leaderboard with group-based column selection."""
|
|
|
128 |
|
129 |
|
130 |
|
131 |
+
# def init_leaderboard_mib_subgraph(dataframe, track):
|
132 |
+
# """Initialize the subgraph leaderboard with grouped column selection by benchmark."""
|
133 |
+
# if dataframe is None or dataframe.empty:
|
134 |
+
# raise ValueError("Leaderboard DataFrame is empty or None.")
|
135 |
+
|
136 |
+
# print("\nDebugging DataFrame columns:", dataframe.columns.tolist())
|
137 |
+
|
138 |
+
# # Create groups of columns by benchmark
|
139 |
+
# benchmark_groups = []
|
140 |
+
|
141 |
+
# # For each benchmark in our TasksMib_Subgraph enum...
|
142 |
+
# for task in TasksMib_Subgraph:
|
143 |
+
# benchmark = task.value.benchmark
|
144 |
+
# # Get all valid columns for this benchmark's models
|
145 |
+
# benchmark_cols = [
|
146 |
+
# f"{benchmark}_{model}"
|
147 |
+
# for model in task.value.models
|
148 |
+
# if f"{benchmark}_{model}" in dataframe.columns
|
149 |
+
# ]
|
150 |
+
# if benchmark_cols: # Only add if we have valid columns
|
151 |
+
# benchmark_groups.append(benchmark_cols)
|
152 |
+
# print(f"\nBenchmark group for {benchmark}:", benchmark_cols)
|
153 |
+
|
154 |
+
# # Create model groups as well
|
155 |
+
# model_groups = []
|
156 |
+
# all_models = list(set(model for task in TasksMib_Subgraph for model in task.value.models))
|
157 |
+
|
158 |
+
# # For each unique model...
|
159 |
+
# for model in all_models:
|
160 |
+
# # Get all valid columns for this model across benchmarks
|
161 |
+
# model_cols = [
|
162 |
+
# f"{task.value.benchmark}_{model}"
|
163 |
+
# for task in TasksMib_Subgraph
|
164 |
+
# if model in task.value.models
|
165 |
+
# and f"{task.value.benchmark}_{model}" in dataframe.columns
|
166 |
+
# ]
|
167 |
+
# if model_cols: # Only add if we have valid columns
|
168 |
+
# model_groups.append(model_cols)
|
169 |
+
# print(f"\nModel group for {model}:", model_cols)
|
170 |
+
|
171 |
+
# # Combine all groups
|
172 |
+
# all_groups = benchmark_groups + model_groups
|
173 |
+
|
174 |
+
# # Flatten groups for default selection (show everything initially)
|
175 |
+
# all_columns = [col for group in all_groups for col in group]
|
176 |
+
# print("\nAll available columns:", all_columns)
|
177 |
+
|
178 |
+
# return Leaderboard(
|
179 |
+
# value=dataframe,
|
180 |
+
# datatype=[c.type for c in fields(AutoEvalColumn_mib_subgraph)],
|
181 |
+
# select_columns=SelectColumns(
|
182 |
+
# default_selection=all_columns, # Show all columns initially
|
183 |
+
# label="Select Results:"
|
184 |
+
# ),
|
185 |
+
# search_columns=["Method"],
|
186 |
+
# hide_columns=[],
|
187 |
+
# interactive=False,
|
188 |
+
# )
|
189 |
+
|
190 |
+
|
191 |
def init_leaderboard_mib_subgraph(dataframe, track):
|
192 |
"""Initialize the subgraph leaderboard with grouped column selection by benchmark."""
|
193 |
if dataframe is None or dataframe.empty:
|
|
|
195 |
|
196 |
print("\nDebugging DataFrame columns:", dataframe.columns.tolist())
|
197 |
|
198 |
+
# First, create a mapping between field names and display names
|
199 |
+
field_to_display = {}
|
200 |
+
for field in fields(AutoEvalColumn_mib_subgraph):
|
201 |
+
if hasattr(field, 'name') and hasattr(field, 'type'):
|
202 |
+
field_to_display[field.name] = field.type
|
203 |
+
|
204 |
# Create groups of columns by benchmark
|
205 |
benchmark_groups = []
|
|
|
|
|
206 |
for task in TasksMib_Subgraph:
|
207 |
benchmark = task.value.benchmark
|
208 |
+
# Get all valid columns for this benchmark's models, using display names
|
209 |
benchmark_cols = [
|
210 |
+
f"{benchmark}({model})" # Use display name format
|
211 |
for model in task.value.models
|
212 |
+
if f"{benchmark}_{model}" in dataframe.columns # Still check using field name
|
213 |
]
|
214 |
+
if benchmark_cols:
|
215 |
benchmark_groups.append(benchmark_cols)
|
216 |
print(f"\nBenchmark group for {benchmark}:", benchmark_cols)
|
217 |
|
218 |
+
# Create model groups with display names
|
219 |
model_groups = []
|
220 |
all_models = list(set(model for task in TasksMib_Subgraph for model in task.value.models))
|
221 |
|
|
|
222 |
for model in all_models:
|
|
|
223 |
model_cols = [
|
224 |
+
f"{task.value.benchmark}({model})" # Use display name format
|
225 |
for task in TasksMib_Subgraph
|
226 |
if model in task.value.models
|
227 |
and f"{task.value.benchmark}_{model}" in dataframe.columns
|
228 |
]
|
229 |
+
if model_cols:
|
230 |
model_groups.append(model_cols)
|
231 |
print(f"\nModel group for {model}:", model_cols)
|
232 |
|
233 |
+
# Combine and flatten groups
|
234 |
all_groups = benchmark_groups + model_groups
|
|
|
|
|
235 |
all_columns = [col for group in all_groups for col in group]
|
236 |
+
|
237 |
+
# Important: We need to rename the DataFrame columns to match our display names
|
238 |
+
display_name_mapping = {
|
239 |
+
f"{task.value.benchmark}_{model}": f"{task.value.benchmark}({model})"
|
240 |
+
for task in TasksMib_Subgraph
|
241 |
+
for model in task.value.models
|
242 |
+
if f"{task.value.benchmark}_{model}" in dataframe.columns
|
243 |
+
}
|
244 |
+
|
245 |
+
# Create a copy of the DataFrame with renamed columns
|
246 |
+
display_df = dataframe.rename(columns=display_name_mapping)
|
247 |
|
248 |
return Leaderboard(
|
249 |
+
value=display_df, # Use the DataFrame with display names
|
250 |
datatype=[c.type for c in fields(AutoEvalColumn_mib_subgraph)],
|
251 |
select_columns=SelectColumns(
|
252 |
+
default_selection=all_columns,
|
253 |
label="Select Results:"
|
254 |
),
|
255 |
search_columns=["Method"],
|
256 |
hide_columns=[],
|
257 |
interactive=False,
|
258 |
)
|
259 |
+
|
260 |
+
|
261 |
|
262 |
# def init_leaderboard_mib_subgraph(dataframe, track):
|
263 |
# """Initialize the subgraph leaderboard with group-based column selection."""
|