Adam Jirkovsky
Add aggregate scores and fix last column visibility
e3e7110
raw
history blame
3.52 kB
import json
import os
import numpy as np
import pandas as pd
from src.display.formatting import has_no_nan_values, make_clickable_model, model_hyperlink
from src.display.formatting import has_no_nan_values, make_clickable_model
from src.display.utils import AutoEvalColumn, EvalQueueColumn, HEADER_MAP
from src.leaderboard.read_evals import get_raw_eval_results
def add_model_hyperlinks(row):
if row["Model URL"] is None or row["Model URL"] == "":
return row["Model"]
else:
return model_hyperlink(row["Model URL"], row["Model"])
def get_leaderboard_df(results_path: str, requests_path: str, cols: list, benchmark_cols: list) -> pd.DataFrame:
raw_data = get_raw_eval_results(results_path, requests_path)
#all_data_json = [v.to_dict() for v in raw_data]
df = pd.DataFrame.from_records(raw_data)
#df = df.sort_values(by=[AutoEvalColumn.average.name], ascending=False)
df = df.rename(columns=HEADER_MAP)
df[df.select_dtypes(include=['number']).columns] *= 100 # convert to percentage
df["Grammar (Avg.)"] = df[["AGREE"]].mean(axis=1)
df["Knowledge (Avg.)"] = df[["ARC-Challenge", "ARC-Easy", "MMLU", "TruthfulQA"]].mean(axis=1)
df["Reasoning (Avg.)"] = df[["ANLI", "Belebele", "CTKFacts", "SQAD"]].mean(axis=1)
df["Math (Avg.)"] = df[["GSM8K", "Klokanek"]].mean(axis=1)
df["Classification (Avg.)"] = df[["Czech News", "Facebook Comments", "Mall Reviews", "Subjectivity"]].mean(axis=1)
df["_"] = "" # The dataframe does not display the last column - BUG in gradio?
df = df[cols].round(decimals=2)
df.replace(r'\s+', np.nan, regex=True)
# filter out if any of the benchmarks have not been produced
df = df[has_no_nan_values(df, benchmark_cols)]
df['Model'] = df.apply(add_model_hyperlinks, axis=1)
return raw_data, df
def get_evaluation_queue_df(save_path: str, cols: list) -> list[pd.DataFrame]:
entries = [entry for entry in os.listdir(save_path) if not entry.startswith(".")]
all_evals = []
for entry in entries:
if ".json" in entry:
file_path = os.path.join(save_path, entry)
with open(file_path) as fp:
data = json.load(fp)
data[EvalQueueColumn.model.name] = make_clickable_model(data["model"])
data[EvalQueueColumn.revision.name] = data.get("revision", "main")
all_evals.append(data)
elif ".md" not in entry:
# this is a folder
sub_entries = [e for e in os.listdir(f"{save_path}/{entry}") if not e.startswith(".")]
for sub_entry in sub_entries:
file_path = os.path.join(save_path, entry, sub_entry)
with open(file_path) as fp:
data = json.load(fp)
data[EvalQueueColumn.model.name] = make_clickable_model(data["model"])
data[EvalQueueColumn.revision.name] = data.get("revision", "main")
all_evals.append(data)
pending_list = [e for e in all_evals if e["status"] in ["PENDING", "RERUN"]]
running_list = [e for e in all_evals if e["status"] == "RUNNING"]
finished_list = [e for e in all_evals if e["status"].startswith("FINISHED") or e["status"] == "PENDING_NEW_EVAL"]
df_pending = pd.DataFrame.from_records(pending_list, columns=cols)
df_running = pd.DataFrame.from_records(running_list, columns=cols)
df_finished = pd.DataFrame.from_records(finished_list, columns=cols)
return df_finished[cols], df_running[cols], df_pending[cols]