import json import os import pandas as pd from src.display.formatting import has_no_nan_values, make_clickable_model from src.display.utils import AutoEvalColumn, EvalQueueColumn from src.leaderboard.read_evals import get_raw_eval_results from src.about import Tasks def get_leaderboard_df(results_path: str, requests_path: str, cols: list, benchmark_cols: list) -> pd.DataFrame: """Creates a dataframe from all the individual experiment results""" raw_data = get_raw_eval_results(results_path, requests_path) all_data_json = [v.to_dict() for v in raw_data] df = pd.DataFrame.from_records(all_data_json) # Sort by accuracy first, then F1 score if len(df): df = df.sort_values(by=[Tasks.task0.value.col_name, Tasks.task1.value.col_name], ascending=False) df[benchmark_cols] = df[benchmark_cols].round(decimals=3) # Filter out incomplete submissions df = df[has_no_nan_values(df, benchmark_cols)] return df