Spaces:
Runtime error
Runtime error
import glob | |
import json | |
import math | |
import os | |
from dataclasses import dataclass | |
import dateutil | |
import numpy as np | |
from src.display.formatting import make_clickable_model | |
from src.display.utils import AutoEvalColumn, ModelType, Tasks, Precision, WeightType | |
from src.submission.check_validity import is_model_on_hub | |
class EvalResult: | |
"""Represents one full evaluation. Built from a combination of the result and request file for a given run. | |
""" | |
model_name: str | |
student_id: str | |
results: dict | |
def init_from_json_file(self, json_filepath): | |
"""Inits the result from the specific model result file""" | |
with open(json_filepath) as fp: | |
data = json.load(fp) | |
config = data.get("config") | |
# Extract results available in this file (some results are split in several files) | |
results = {} | |
for task in Tasks: | |
task = task.value | |
# We average all scores of a given metric (not all metrics are present in all files) | |
accs = np.array([v.get(task.metric, None) for k, v in data["results"].items() if task.benchmark == k]) | |
if accs.size == 0 or any([acc is None for acc in accs]): | |
continue | |
results[task.col_name] = accs.mean() | |
return self( | |
model_name=config.get("model_name", None), | |
student_id=config.get("student_id", None), | |
results=results, | |
) | |
def update_with_request_file(self, requests_path, model_name, student_id): | |
"""Finds the relevant request file for the current model and updates info with it""" | |
request_file = get_request_file_for_model(requests_path, model_name, student_id) | |
try: | |
with open(request_file, "r") as f: | |
request = json.load(f) | |
self.date = request.get("submitted_time", "") | |
except Exception: | |
print(f"Could not find request file for {student_id}_{model_name}") | |
def to_dict(self): | |
"""Converts the Eval Result to a dict compatible with our dataframe display""" | |
data_dict = { | |
"eval_name": self.eval_name, # not a column, just a save name | |
"Model Name": self.model_name, | |
} | |
# Add task-specific metrics | |
for task in Tasks: | |
data_dict[task.value.col_name] = self.results.get(task.value.col_name, None) | |
# Add student ID and submission date | |
data_dict["Student ID"] = self.student_id | |
data_dict["Submission Date"] = self.date | |
return data_dict | |
def get_request_file_for_model(requests_path, model_name, student_id): | |
"""Selects the correct request file for a given model.""" | |
request_files = os.path.join( | |
requests_path, student_id, | |
f"request_{student_id}_{model_name}*.json", | |
) | |
request_files = glob.glob(request_files) | |
# Select the latest request file based on the modification date | |
request_file = "" | |
request_files = sorted(request_files, key=lambda x: os.path.getmtime(x), reverse=True) | |
if len(request_files) > 0: | |
request_file = request_files[0] | |
return request_file | |
def get_raw_eval_results(results_path: str, requests_path: str) -> list[EvalResult]: | |
"""From the path of the results folder root, extract all needed info for results""" | |
model_result_filepaths = [] | |
for root, _, files in os.walk(results_path): | |
# Filter out non-JSON files | |
files = [f for f in files if f.endswith(".json") and f.startswith("result")] | |
# Sort the files by date | |
try: | |
files.sort(key=lambda x: x.removesuffix(".json").removeprefix("result")[:-7]) | |
except dateutil.parser._parser.ParserError: | |
files = [files[-1]] | |
for file in files: | |
model_result_filepaths.append(os.path.join(root, file)) | |
eval_results = {} | |
for model_result_filepath in model_result_filepaths: | |
# Creation of result | |
eval_result = EvalResult.init_from_json_file(model_result_filepath) | |
eval_result.update_with_request_file(requests_path, eval_result.model_name, eval_result.student_id) | |
# Store results of same eval together | |
eval_name = f"{eval_result.student_id}_{eval_result.model_name}" | |
eval_result.eval_name = eval_name | |
if eval_name in eval_results.keys(): | |
eval_results[eval_name].results.update({k: v for k, v in eval_result.results.items() if v is not None}) | |
else: | |
eval_results[eval_name] = eval_result | |
results = [] | |
for v in eval_results.values(): | |
try: | |
v.to_dict() # we test if the dict version is complete | |
results.append(v) | |
except KeyError: # not all eval values present | |
continue | |
return results | |