Spaces:
Running
Running
File size: 4,925 Bytes
64fcb68 2018d03 64fcb68 2018d03 64fcb68 2018d03 64fcb68 2018d03 64fcb68 2018d03 64fcb68 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 |
from huggingface_hub import HfFileSystem
import pandas as pd
from utils import logger
import os
from datetime import datetime
import threading
fs = HfFileSystem()
IMPORTANT_MODELS = [
"auto",
"bert", # old but dominant (encoder only)
"gpt2", # old (decoder)
"t5", # old (encoder-decoder)
"modernbert", # (encoder only)
"vit", # old (vision) - fixed comma
"clip", # old but dominant (vision)
"detr", # objection detection, segmentation (vision)
"table-transformer", # objection detection (visioin) - maybe just detr?
"got_ocr2", # ocr (vision)
"whisper", # old but dominant (audio)
"wav2vec2", # old (audio)
"llama", # new and dominant (meta)
"gemma3", # new (google)
"qwen2", # new (Alibaba)
"mistral3", # new (Mistral) - added missing comma
"qwen2_5_vl", # new (vision)
"llava", # many models from it (vision)
"smolvlm", # new (video)
"internvl", # new (video)
"gemma3n", # new (omnimodal models)
"qwen2_5_omni", # new (omnimodal models)
]
def read_one_dataframe(json_path: str, device_label: str) -> pd.DataFrame:
df = pd.read_json(json_path, orient="index")
df.index.name = "model_name"
df[f"failed_multi_no_{device_label}"] = df["failures"].apply(lambda x: len(x["multi"]) if "multi" in x else 0)
df[f"failed_single_no_{device_label}"] = df["failures"].apply(lambda x: len(x["single"]) if "single" in x else 0)
return df
def get_distant_data() -> pd.DataFrame:
# Retrieve AMD dataframe
amd_src = "hf://datasets/optimum-amd/transformers_daily_ci/**/runs/**/ci_results_run_models_gpu/model_results.json"
files_amd = sorted(fs.glob(amd_src), reverse=True)
df_amd = read_one_dataframe(f"hf://{files_amd[0]}", "amd")
# Retrieve NVIDIA dataframe
nvidia_src = "hf://datasets/hf-internal-testing/transformers_daily_ci/**/ci_results_run_models_gpu/model_results.json"
files_nvidia = sorted(fs.glob(nvidia_src), reverse=True)
# NOTE: should this be removeprefix instead of lstrip?
nvidia_path = files_nvidia[0].lstrip('datasets/hf-internal-testing/transformers_daily_ci/')
nvidia_path = "https://huggingface.co/datasets/hf-internal-testing/transformers_daily_ci/raw/main/" + nvidia_path
df_nvidia = read_one_dataframe(nvidia_path, "nvidia")
# Join both dataframes
joined = df_amd.join(df_nvidia, rsuffix="_nvidia", lsuffix="_amd", how="outer")
joined = joined[
[
"success_amd",
"success_nvidia",
"failed_multi_no_amd",
"failed_multi_no_nvidia",
"failed_single_no_amd",
"failed_single_no_nvidia",
"failures_amd",
"failures_nvidia",
"job_link_amd",
"job_link_nvidia",
]
]
joined.index = joined.index.str.replace("^models_", "", regex=True)
# Fitler out all but important models
important_models_lower = [model.lower() for model in IMPORTANT_MODELS]
filtered_joined = joined[joined.index.str.lower().isin(important_models_lower)]
return filtered_joined
def get_sample_data() -> pd.DataFrame:
path = os.path.join(os.path.dirname(__file__), "sample_data.csv")
df = pd.read_csv(path)
df = df.set_index("model_name")
return df
class CIResults:
def __init__(self):
self.df = pd.DataFrame()
self.available_models = []
self.last_update_time = ""
def load_data(self) -> None:
"""Load data from the data source."""
# Try loading the distant data, and fall back on sample data for local tinkering
try:
logger.info("Loading distant data...")
new_df = get_distant_data()
except Exception as e:
logger.error(f"Loading data failed: {e}")
logger.warning("Falling back on sample data.")
new_df = get_sample_data()
# Update attributes
self.df = new_df
self.available_models = new_df.index.tolist()
self.last_update_time = datetime.now().strftime('%H:%M')
# Log and return distant load status
logger.info(f"Data loaded successfully: {len(self.available_models)} models")
logger.info(f"Models: {self.available_models[:5]}{'...' if len(self.available_models) > 5 else ''}")
def schedule_data_reload(self):
"""Schedule the next data reload."""
def reload_data():
self.load_data()
# Schedule the next reload in 15 minutes (900 seconds)
timer = threading.Timer(900.0, reload_data)
timer.daemon = True # Dies when main thread dies
timer.start()
logger.info("Next data reload scheduled in 15 minutes")
# Start the first reload timer
timer = threading.Timer(900.0, reload_data)
timer.daemon = True
timer.start()
logger.info("Data auto-reload scheduled every 15 minutes")
|