Spaces:
Running
Running
update trust safety metrics
Browse files- src/display/utils.py +5 -2
- src/populate.py +19 -56
src/display/utils.py
CHANGED
|
@@ -77,8 +77,11 @@ ts_eval_column_dict = []
|
|
| 77 |
# Init
|
| 78 |
ts_eval_column_dict.append(["model", ColumnContent, ColumnContent("Model Name", "markdown", True, never_hidden=True)])
|
| 79 |
ts_eval_column_dict.append(["model_provider", ColumnContent, ColumnContent("LLM Provider", "markdown", True)])
|
| 80 |
-
ts_eval_column_dict.append(["
|
| 81 |
-
ts_eval_column_dict.append(["safety", ColumnContent, ColumnContent("Safety", "markdown",
|
|
|
|
|
|
|
|
|
|
| 82 |
TSEvalColumn = make_dataclass("TSEvalColumn", ts_eval_column_dict, frozen=True)
|
| 83 |
|
| 84 |
|
|
|
|
| 77 |
# Init
|
| 78 |
ts_eval_column_dict.append(["model", ColumnContent, ColumnContent("Model Name", "markdown", True, never_hidden=True)])
|
| 79 |
ts_eval_column_dict.append(["model_provider", ColumnContent, ColumnContent("LLM Provider", "markdown", True)])
|
| 80 |
+
ts_eval_column_dict.append(["ts", ColumnContent, ColumnContent("Trust & Safety", "markdown", True)])
|
| 81 |
+
ts_eval_column_dict.append(["safety", ColumnContent, ColumnContent("Safety", "markdown", False)])
|
| 82 |
+
ts_eval_column_dict.append(["privacy", ColumnContent, ColumnContent("Privacy", "markdown", False)])
|
| 83 |
+
ts_eval_column_dict.append(["truthfulness", ColumnContent, ColumnContent("Truthfulness", "markdown", False)])
|
| 84 |
+
|
| 85 |
TSEvalColumn = make_dataclass("TSEvalColumn", ts_eval_column_dict, frozen=True)
|
| 86 |
|
| 87 |
|
src/populate.py
CHANGED
|
@@ -2,10 +2,6 @@ import os
|
|
| 2 |
|
| 3 |
import pandas as pd
|
| 4 |
|
| 5 |
-
# from src.display.formatting import has_no_nan_values, make_clickable_model
|
| 6 |
-
# from src.display.utils import AutoEvalColumn, EvalQueueColumn
|
| 7 |
-
# from src.leaderboard.read_evals import get_raw_eval_results
|
| 8 |
-
|
| 9 |
|
| 10 |
def get_leaderboard_df_crm(
|
| 11 |
crm_results_path: str, accuracy_cols: list, cost_cols: list
|
|
@@ -18,9 +14,6 @@ def get_leaderboard_df_crm(
|
|
| 18 |
# leaderboard_accuracy_df = leaderboard_accuracy_df.sort_values(
|
| 19 |
# by=[AutoEvalColumn.accuracy_metric_average.name], ascending=False
|
| 20 |
# )
|
| 21 |
-
# print(leaderboard_accuracy_df)
|
| 22 |
-
# print(leaderboard_accuracy_df.columns)
|
| 23 |
-
# print(leaderboard_accuracy_df["Model Name"].nunique())
|
| 24 |
leaderboard_accuracy_df = leaderboard_accuracy_df[accuracy_cols].round(decimals=2)
|
| 25 |
|
| 26 |
ref_df = leaderboard_accuracy_df[["Model Name", "LLM Provider"]].drop_duplicates()
|
|
@@ -34,54 +27,24 @@ def get_leaderboard_df_crm(
|
|
| 34 |
leaderboard_ts_df = pd.read_csv(os.path.join(crm_results_path, "hf_leaderboard_ts.csv"))
|
| 35 |
leaderboard_ts_df = leaderboard_ts_df[~leaderboard_ts_df["Model Name"].isin(sf_finetuned_models)]
|
| 36 |
leaderboard_ts_df = leaderboard_ts_df.join(ref_df.set_index("Model Name"), on="Model Name")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 37 |
|
| 38 |
return leaderboard_accuracy_df, leaderboard_cost_df, leaderboard_ts_df
|
| 39 |
-
|
| 40 |
-
|
| 41 |
-
# def get_leaderboard_df(results_path: str, requests_path: str, cols: list, benchmark_cols: list) -> pd.DataFrame:
|
| 42 |
-
# """Creates a dataframe from all the individual experiment results"""
|
| 43 |
-
# raw_data = get_raw_eval_results(results_path, requests_path)
|
| 44 |
-
# all_data_json = [v.to_dict() for v in raw_data]
|
| 45 |
-
|
| 46 |
-
# df = pd.DataFrame.from_records(all_data_json)
|
| 47 |
-
# df = df.sort_values(by=[AutoEvalColumn.average.name], ascending=False)
|
| 48 |
-
# df = df[cols].round(decimals=2)
|
| 49 |
-
|
| 50 |
-
# # filter out if any of the benchmarks have not been produced
|
| 51 |
-
# df = df[has_no_nan_values(df, benchmark_cols)]
|
| 52 |
-
# return raw_data, df
|
| 53 |
-
|
| 54 |
-
# def get_evaluation_queue_df(save_path: str, cols: list) -> list[pd.DataFrame]:
|
| 55 |
-
# """Creates the different dataframes for the evaluation queues requestes"""
|
| 56 |
-
# entries = [entry for entry in os.listdir(save_path) if not entry.startswith(".")]
|
| 57 |
-
# all_evals = []
|
| 58 |
-
|
| 59 |
-
# for entry in entries:
|
| 60 |
-
# if ".json" in entry:
|
| 61 |
-
# file_path = os.path.join(save_path, entry)
|
| 62 |
-
# with open(file_path) as fp:
|
| 63 |
-
# data = json.load(fp)
|
| 64 |
-
|
| 65 |
-
# data[EvalQueueColumn.model.name] = make_clickable_model(data["model"])
|
| 66 |
-
# data[EvalQueueColumn.revision.name] = data.get("revision", "main")
|
| 67 |
-
|
| 68 |
-
# all_evals.append(data)
|
| 69 |
-
# elif ".md" not in entry:
|
| 70 |
-
# # this is a folder
|
| 71 |
-
# sub_entries = [e for e in os.listdir(f"{save_path}/{entry}") if not e.startswith(".")]
|
| 72 |
-
# for sub_entry in sub_entries:
|
| 73 |
-
# file_path = os.path.join(save_path, entry, sub_entry)
|
| 74 |
-
# with open(file_path) as fp:
|
| 75 |
-
# data = json.load(fp)
|
| 76 |
-
|
| 77 |
-
# data[EvalQueueColumn.model.name] = make_clickable_model(data["model"])
|
| 78 |
-
# data[EvalQueueColumn.revision.name] = data.get("revision", "main")
|
| 79 |
-
# all_evals.append(data)
|
| 80 |
-
|
| 81 |
-
# pending_list = [e for e in all_evals if e["status"] in ["PENDING", "RERUN"]]
|
| 82 |
-
# running_list = [e for e in all_evals if e["status"] == "RUNNING"]
|
| 83 |
-
# finished_list = [e for e in all_evals if e["status"].startswith("FINISHED") or e["status"] == "PENDING_NEW_EVAL"]
|
| 84 |
-
# df_pending = pd.DataFrame.from_records(pending_list, columns=cols)
|
| 85 |
-
# df_running = pd.DataFrame.from_records(running_list, columns=cols)
|
| 86 |
-
# df_finished = pd.DataFrame.from_records(finished_list, columns=cols)
|
| 87 |
-
# return df_finished[cols], df_running[cols], df_pending[cols]
|
|
|
|
| 2 |
|
| 3 |
import pandas as pd
|
| 4 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 5 |
|
| 6 |
def get_leaderboard_df_crm(
|
| 7 |
crm_results_path: str, accuracy_cols: list, cost_cols: list
|
|
|
|
| 14 |
# leaderboard_accuracy_df = leaderboard_accuracy_df.sort_values(
|
| 15 |
# by=[AutoEvalColumn.accuracy_metric_average.name], ascending=False
|
| 16 |
# )
|
|
|
|
|
|
|
|
|
|
| 17 |
leaderboard_accuracy_df = leaderboard_accuracy_df[accuracy_cols].round(decimals=2)
|
| 18 |
|
| 19 |
ref_df = leaderboard_accuracy_df[["Model Name", "LLM Provider"]].drop_duplicates()
|
|
|
|
| 27 |
leaderboard_ts_df = pd.read_csv(os.path.join(crm_results_path, "hf_leaderboard_ts.csv"))
|
| 28 |
leaderboard_ts_df = leaderboard_ts_df[~leaderboard_ts_df["Model Name"].isin(sf_finetuned_models)]
|
| 29 |
leaderboard_ts_df = leaderboard_ts_df.join(ref_df.set_index("Model Name"), on="Model Name")
|
| 30 |
+
privacy_cols = leaderboard_ts_df[
|
| 31 |
+
[
|
| 32 |
+
"Privacy Zero-Shot Match Avoidance",
|
| 33 |
+
"Privacy Zero-Shot Reveal Avoidance",
|
| 34 |
+
"Privacy Five-Shot Match Avoidance",
|
| 35 |
+
"Privacy Five-Shot Reveal Avoidance",
|
| 36 |
+
]
|
| 37 |
+
].apply(lambda x: x.str.rstrip("%").astype("float") / 100.0, axis=1)
|
| 38 |
+
|
| 39 |
+
leaderboard_ts_df["Privacy"] = privacy_cols.mean(axis=1).transform(lambda x: "{:,.2%}".format(x))
|
| 40 |
+
|
| 41 |
+
ts_cols = leaderboard_ts_df[
|
| 42 |
+
[
|
| 43 |
+
"Safety",
|
| 44 |
+
"Privacy",
|
| 45 |
+
"Truthfulness",
|
| 46 |
+
]
|
| 47 |
+
].apply(lambda x: x.str.rstrip("%").astype("float") / 100.0, axis=1)
|
| 48 |
+
leaderboard_ts_df["Trust & Safety"] = ts_cols.mean(axis=1).transform(lambda x: "{:,.2%}".format(x))
|
| 49 |
|
| 50 |
return leaderboard_accuracy_df, leaderboard_cost_df, leaderboard_ts_df
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|