Spaces:
Running
Running
ycy
commited on
Commit
·
7dad3b1
1
Parent(s):
979e0a3
test
Browse files- src/populate.py +2 -39
src/populate.py
CHANGED
@@ -12,51 +12,14 @@ def get_leaderboard_df(results_path: str, requests_path: str, cols: list, benchm
|
|
12 |
"""Creates a dataframe from all the individual experiment results"""
|
13 |
raw_data = get_raw_eval_results(results_path, requests_path)
|
14 |
|
15 |
-
# raw_data示例
|
16 |
-
"""raw_data = [
|
17 |
-
EvalResult(
|
18 |
-
model_name="org1/model1",
|
19 |
-
model_dtype="float32",
|
20 |
-
model_sha="commit_hash1",
|
21 |
-
results={
|
22 |
-
"task1": {"metric1": 0.85, "metric2": 0.90},
|
23 |
-
"task2": {"metric1": 0.75, "metric2": 0.80}
|
24 |
-
},
|
25 |
-
model_type="Pretrained",
|
26 |
-
weight_type="Original",
|
27 |
-
license="MIT",
|
28 |
-
likes=100,
|
29 |
-
params=123456789,
|
30 |
-
submitted_time="2025-02-28T12:34:56Z",
|
31 |
-
status="FINISHED",
|
32 |
-
precision="float32"
|
33 |
-
),
|
34 |
-
EvalResult(
|
35 |
-
model_name="org2/model2",
|
36 |
-
model_dtype="float32",
|
37 |
-
model_sha="commit_hash2",
|
38 |
-
results={
|
39 |
-
"task1": {"metric1": 0.88, "metric2": 0.92},
|
40 |
-
"task2": {"metric1": 0.78, "metric2": 0.82}
|
41 |
-
},
|
42 |
-
model_type="Fine-tuned",
|
43 |
-
weight_type="Adapter",
|
44 |
-
license="Apache-2.0",
|
45 |
-
likes=200,
|
46 |
-
params=987654321,
|
47 |
-
submitted_time="2025-02-28T12:34:56Z",
|
48 |
-
status="FINISHED",
|
49 |
-
precision="float32"
|
50 |
-
)
|
51 |
-
]
|
52 |
-
"""
|
53 |
all_data_json = [v.to_dict() for v in raw_data]
|
54 |
|
55 |
df = pd.DataFrame.from_records(all_data_json)
|
56 |
|
57 |
df = df.sort_values(by=[AutoEvalColumn.task0.name], ascending=False)
|
58 |
df = df[cols].round(decimals=2)
|
59 |
-
|
|
|
60 |
# filter out if any of the benchmarks have not been produced
|
61 |
df = df[has_no_nan_values(df, benchmark_cols)]
|
62 |
return df
|
|
|
12 |
"""Creates a dataframe from all the individual experiment results"""
|
13 |
raw_data = get_raw_eval_results(results_path, requests_path)
|
14 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
15 |
all_data_json = [v.to_dict() for v in raw_data]
|
16 |
|
17 |
df = pd.DataFrame.from_records(all_data_json)
|
18 |
|
19 |
df = df.sort_values(by=[AutoEvalColumn.task0.name], ascending=False)
|
20 |
df = df[cols].round(decimals=2)
|
21 |
+
print(df)
|
22 |
+
assert 0
|
23 |
# filter out if any of the benchmarks have not been produced
|
24 |
df = df[has_no_nan_values(df, benchmark_cols)]
|
25 |
return df
|