import gradio as gr
from gradio_leaderboard import Leaderboard, ColumnFilter, SelectColumns
import pandas as pd
from apscheduler.schedulers.background import BackgroundScheduler
from huggingface_hub import snapshot_download
from functools import lru_cache
import logging
from src.about import CITATION_BUTTON_LABEL, CITATION_BUTTON_TEXT, EVALUATION_QUEUE_TEXT, INTRODUCTION_TEXT, \
LLM_BENCHMARKS_TEXT, TITLE
from src.tasks import TASK_DESCRIPTIONS, MEASURE_DESCRIPTION
from src.display.css_html_js import custom_css
from src.display.utils import BENCHMARK_COLS, COLS, EVAL_COLS, EVAL_TYPES, AutoEvalColumn, ModelType, fields, \
WeightType, Precision
from src.envs import API, EVAL_REQUESTS_PATH, EVAL_RESULTS_PATH, QUEUE_REPO, REPO_ID, RESULTS_REPO, TOKEN
from src.populate import get_evaluation_queue_df, get_leaderboard_df
from src.submission.submit import add_new_eval
import matplotlib.pyplot as plt
import re
import plotly.express as px
import plotly.graph_objects as go
import numpy as np
# Configure logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
# EVALITA results
BASELINES = {
"TE": 71.00, "SA": 66.38, "HS": 80.88, "AT": 82.40, "WIC": 85.00,
"LS": 38.82, "SU": 38.91, "NER": 88.00, "REL": 62.99
}
# GPT-4o results
REFERENCES = {
"NER": 79.11, "REL": 63.32, "LS": 59.25, "SU": 33.04
}
TASK_METADATA_MULTIPLECHOICE = {
"TE": {"icon": "📊", "name": "Textual Entailment", "tooltip": ""},
"SA": {"icon": "😃", "name": "Sentiment Analysis", "tooltip": ""},
"HS": {"icon": "⚠️", "name": "Hate Speech", "tooltip": ""},
"AT": {"icon": "🏥", "name": "Admission Test", "tooltip": ""},
"WIC": {"icon": "🔤", "name": "Word in Context", "tooltip": ""},
"FAQ": {"icon": "❓", "name": "Frequently Asked Questions", "tooltip": ""}
}
TASK_METADATA_GENERATIVE = {
"LS": {"icon": "🔄", "name": "Lexical Substitution", "tooltip": ""},
"SU": {"icon": "📝", "name": "Summarization", "tooltip": ""},
"NER": {"icon": "🏷️", "name": "Named Entity Recognition", "tooltip": ""},
"REL": {"icon": "🔗", "name": "Relation Extraction", "tooltip": ""},
}
def theoretical_performance(df_hash):
"""
Theoretical performance of a model that scores the highest on every individual task
"""
# This is a placeholder - you'd need to pass the actual dataframe
# In practice, you'd compute this once and store it
#fields = ["TE", "SA", "HS", "AT", "WIC", "FAQ", "LS", "SU", "NER", "REL"]
return 75.0 # Placeholder value
def scale_sizes(values, min_size=8, max_size=30):
"""Normalize sizes for scatter plot markers """
if not values:
return []
vmin, vmax = min(values), max(values)
if vmax == vmin:
return [(min_size + max_size) / 2] * len(values)
return [
min_size + (val - vmin) / (vmax - vmin) * (max_size - min_size)
for val in values
]
def extract_model_name(model_string):
"""Extract model name from HTML string."""
match = re.search(r'>([^<]+)<', model_string)
return match.group(1) if match else model_string
def create_line_chart(dataframe):
"""Create left chart."""
def scale_sizes(values, min_size=8, max_size=30):
vmin, vmax = min(values), max(values)
return [
min_size + (val - vmin) / (vmax - vmin) * (max_size - min_size) if vmax > vmin
else (min_size + max_size) / 2
for val in values
]
fig = go.Figure()
# Loop su 5-Shot e 0-Shot
for shot, color in [(True, "blue"), (False, "red")]:
df = dataframe[dataframe["IS_FS"] == shot]
x = df["#Params (B)"].tolist()
y = df["Avg. Comb. Perf. ⬆️"].tolist()
labels = [
re.search(r'>([^<]+)<', m).group(1) if isinstance(m, str) and re.search(r'>([^<]+)<', m) else str(m)
for m in df["Model"].tolist()
]
fig.add_trace(go.Scatter(
x=x,
y=y,
mode="markers",
name="5-Shot" if shot else "0-Shot",
marker=dict(color=color, size=scale_sizes(x)),
hovertemplate="%{customdata}
#Params: %{x}
Performance: %{y}
"
"with 5-shot can outperform larger zero-shot models.",
xref="paper", yref="paper", x=0.5, y=-0.3,
showarrow=False, font=dict(size=11, color="gray"),
align="center", xanchor="center"
)
fig.update_xaxes(fixedrange=True, rangeslider_visible=False)
fig.update_yaxes(fixedrange=True)
return fig
# Create right chart
def create_boxplot_task(dataframe=None, baselines=None, references=None):
"""Create right chart"""
tasks = ["TE", "SA", "HS", "AT", "WIC", "FAQ", "LS", "SU", "NER", "REL"]
# Dati di default se non forniti
if dataframe is None:
np.random.seed(42)
dataframe = pd.DataFrame({task: np.random.uniform(0.4, 0.9, 20) * 100 for task in tasks})
if baselines is None:
baselines = {task: np.random.randint(50, 70) for task in tasks}
if references is None:
references = {}
colors = ["#1f77b4", "#ff7f0e", "#2ca02c", "#d62728", "#9467bd",
"#8c564b", "#e377c2", "#7f7f7f", "#bcbd22", "#17becf"]
fig = go.Figure()
for i, task in enumerate(tasks):
if task not in dataframe.columns:
continue
y_data = dataframe[task].dropna().tolist()
# Boxplot
fig.add_trace(go.Box(
y=y_data,
name=task,
marker=dict(color=colors[i]),
line=dict(color="black", width=2),
fillcolor=colors[i],
opacity=0.7,
hovertemplate=""+task+"
Accuracy: %{y:.2f}%
"
"models at EVALITA (dashed black line); in NER and REL they remain lower.
"
"Dashed red lines show GPT-4o reference results for generative tasks."
),
xref="paper", yref="paper",
x=0.5, y=-0.30,
showarrow=False,
font=dict(size=11, color="gray"),
align="center"
)
fig.update_yaxes(range=[0, 100], fixedrange=True)
fig.update_xaxes(fixedrange=True)
return fig
def create_medal_assignments(sorted_df):
"""Function for medal assignment logic"""
medals = {
'large_fs': False, 'medium_fs': False, 'small_fs': False,
'large_0shot': False, 'medium_0shot': False, 'small_0shot': False
}
new_model_column = []
for _, row in sorted_df.iterrows():
model_name = row['Model']
size = row["Size"]
is_fs = row['IS_FS']
if is_fs: # 5-Few-Shot
if size == "🔵🔵🔵" and not medals['large_fs']:
model_name = f"{model_name} 🔵🔵🔵🏆"
medals['large_fs'] = True
elif size == "🔵🔵" and not medals['medium_fs']:
model_name = f"{model_name} 🔵🔵🏆"
medals['medium_fs'] = True
elif size == "🔵" and not medals['small_fs']:
model_name = f"{model_name} 🔵🏆"
medals['small_fs'] = True
else: # 0-Shot
if size == "🔵🔵🔵" and not medals['large_0shot']:
model_name = f"{model_name} 🔵🔵🔵🎖️"
medals['large_0shot'] = True
elif size == "🔵🔵" and not medals['medium_0shot']:
model_name = f"{model_name} 🔵🔵🎖️"
medals['medium_0shot'] = True
elif size == "🔵" and not medals['small_0shot']:
model_name = f"{model_name} 🔵🎖️"
medals['small_0shot'] = True
new_model_column.append(model_name)
return new_model_column
def create_leaderboard_base(sorted_dataframe, field_list, hidden_columns):
"""Base leaderboard creation with common parameters. """
return Leaderboard(
value=sorted_dataframe,
datatype=[c.type for c in field_list],
search_columns=[AutoEvalColumn.model.name, AutoEvalColumn.license.name],
hide_columns=hidden_columns,
filter_columns=[
ColumnFilter(AutoEvalColumn.fewshot_symbol.name, type="checkboxgroup", label="N-Shot Learning (FS)"),
ColumnFilter(AutoEvalColumn.params.name, type="slider", min=0, max=100, default=[0, 100],
label="Select the number of parameters (B)"),
],
bool_checkboxgroup_label="Evaluation Mode",
interactive=False,
)
def init_leaderboard(dataframe, default_selection=None, hidden_columns=None):
"""Leaderboard initialization. """
if dataframe is None or dataframe.empty:
raise ValueError("Leaderboard DataFrame is empty or None.")
# Sort and reset index
sorted_dataframe = dataframe.sort_values(by="Avg. Comb. Perf. ⬆️", ascending=False).reset_index(drop=True)
sorted_dataframe["Rank"] = sorted_dataframe.index + 1
# Apply medal assignments
sorted_dataframe["Model"] = create_medal_assignments(sorted_dataframe)
field_list = fields(AutoEvalColumn)
return create_leaderboard_base(sorted_dataframe, field_list, hidden_columns)
def update_task_leaderboard(dataframe, default_selection=None, hidden_columns=None):
""" Task-specific leaderboard update."""
if dataframe is None or dataframe.empty:
raise ValueError("Leaderboard DataFrame is empty or None.")
# Sort and reset index
sorted_dataframe = dataframe.sort_values(by="Combined Performance", ascending=False).reset_index(drop=True)
sorted_dataframe["Rank"] = sorted_dataframe.index + 1
# Apply medal assignments
sorted_dataframe["Model"] = create_medal_assignments(sorted_dataframe)
field_list = fields(AutoEvalColumn)
return Leaderboard(
value=sorted_dataframe,
datatype=[c.type for c in field_list] + [int],
search_columns=[AutoEvalColumn.model.name, AutoEvalColumn.license.name],
hide_columns=hidden_columns,
filter_columns=[
ColumnFilter(AutoEvalColumn.fewshot_symbol.name, type="checkboxgroup", label="N-Shot Learning (FS)"),
ColumnFilter(AutoEvalColumn.params.name, type="slider", min=0, max=100, default=[0, 100],
label="Select the number of parameters (B)"),
],
bool_checkboxgroup_label="Evaluation Mode",
interactive=False
)
def download_snapshot(repo, local_dir, max_retries=3):
"""Snapshot download with retry logic."""
for attempt in range(max_retries):
try:
logger.info(f"Downloading from {repo} to {local_dir} (attempt {attempt + 1}/{max_retries})")
snapshot_download(
repo_id=repo,
local_dir=local_dir,
repo_type="dataset",
tqdm_class=None,
etag_timeout=30,
token=TOKEN
)
return True
except Exception as e:
logger.error(f"Error downloading {repo} (attempt {attempt + 1}): {e}")
if attempt == max_retries - 1:
logger.error(f"Failed to download {repo} after {max_retries} attempts")
return False
return False
def restart_space():
"""Restart the Hugging Face space."""
try:
logger.info("Restarting space...")
API.restart_space(repo_id=REPO_ID)
except Exception as e:
logger.error(f"Error restarting space: {e}")
def create_title_html():
"""Function for title HTML."""
return """