|
import os |
|
|
|
os.environ["HF_HOME"] = "src/data_cache" |
|
|
|
import streamlit as st |
|
import pandas as pd |
|
import altair as alt |
|
from huggingface_hub import HfApi, HfFileSystem |
|
import json |
|
from pathlib import Path |
|
from typing import Dict, List, Optional |
|
import numpy as np |
|
|
|
|
|
st.set_page_config( |
|
page_title="Grounding Benchmark Leaderboard", |
|
page_icon="🎯", |
|
layout="wide" |
|
) |
|
|
|
|
|
REPO_ID = "mlfoundations-cua-dev/leaderboard" |
|
GROUNDING_PATH = "grounding" |
|
|
|
|
|
BASELINES = { |
|
"screenspot-v2": { |
|
"Qwen2-VL-7B": { |
|
"desktop_text": 52.01, |
|
"desktop_icon": 44.98, |
|
"web_text": 33.04, |
|
"web_icon": 21.84, |
|
"overall": 37.96 |
|
}, |
|
"UI-TARS-2B": { |
|
"desktop_text": 90.7, |
|
"desktop_icon": 68.6, |
|
"web_text": 87.2, |
|
"web_icon": 84.7, |
|
"overall": 82.8 |
|
}, |
|
"UI-TARS-7B": { |
|
"desktop_text": 95.4, |
|
"desktop_icon": 87.8, |
|
"web_text": 93.8, |
|
"web_icon": 91.6, |
|
"overall": 92.2 |
|
}, |
|
"UI-TARS-72B": { |
|
"desktop_text": 91.2, |
|
"desktop_icon": 87.8, |
|
"web_text": 87.7, |
|
"web_icon": 86.3, |
|
"overall": 88.3 |
|
} |
|
}, |
|
"screenspot-pro": { |
|
"Qwen2.5-VL-3B-Instruct": { |
|
"overall": 16.1 |
|
}, |
|
"Qwen2.5-VL-7B-Instruct": { |
|
"overall": 26.8 |
|
}, |
|
"Qwen2.5-VL-72B-Instruct": { |
|
"overall": 53.3 |
|
}, |
|
"UI-TARS-2B": { |
|
"overall": 27.7 |
|
}, |
|
"UI-TARS-7B": { |
|
"overall": 35.7 |
|
}, |
|
"UI-TARS-72B": { |
|
"overall": 38.1 |
|
} |
|
} |
|
} |
|
|
|
@st.cache_data(ttl=300) |
|
def fetch_leaderboard_data(): |
|
"""Fetch all grounding results from HuggingFace leaderboard by streaming JSON files.""" |
|
api = HfApi() |
|
fs = HfFileSystem() |
|
|
|
try: |
|
|
|
files = api.list_repo_files(repo_id=REPO_ID, repo_type="dataset") |
|
grounding_files = [f for f in files if f.startswith(f"{GROUNDING_PATH}/") and f.endswith(".json")] |
|
|
|
results = [] |
|
|
|
|
|
progress_bar = st.progress(0) |
|
status_text = st.empty() |
|
|
|
for idx, file_path in enumerate(grounding_files): |
|
try: |
|
|
|
progress = (idx + 1) / len(grounding_files) |
|
progress_bar.progress(progress) |
|
status_text.text(f"Loading {idx + 1}/{len(grounding_files)} files...") |
|
|
|
|
|
file_url = f"datasets/{REPO_ID}/{file_path}" |
|
|
|
|
|
with fs.open(file_url, 'r') as f: |
|
data = json.load(f) |
|
|
|
|
|
metadata = data.get("metadata", {}) |
|
metrics = data.get("metrics", {}) |
|
detailed_results = data.get("detailed_results", {}) |
|
|
|
|
|
path_parts = file_path.split('/') |
|
dataset_name = path_parts[1] if len(path_parts) > 1 else "unknown" |
|
|
|
|
|
model_checkpoint = metadata.get("model_checkpoint", "") |
|
model_name = model_checkpoint.split('/')[-1] |
|
base_model_name = None |
|
is_checkpoint = False |
|
|
|
|
|
if not model_name and len(path_parts) > 2: |
|
|
|
if len(path_parts) > 3 and path_parts[2] != path_parts[3]: |
|
|
|
base_model_name = path_parts[2] |
|
checkpoint_file = path_parts[3].replace(".json", "") |
|
model_name = f"{base_model_name}/{checkpoint_file}" |
|
is_checkpoint = True |
|
else: |
|
|
|
model_name = path_parts[2].replace("results_", "").replace(".json", "") |
|
base_model_name = model_name |
|
|
|
|
|
if 'checkpoint-' in model_name: |
|
is_checkpoint = True |
|
if not base_model_name: |
|
|
|
if '/' in model_name: |
|
parts = model_name.split('/') |
|
base_model_name = parts[0] |
|
else: |
|
|
|
checkpoint_parts = model_checkpoint.split('/') |
|
if len(checkpoint_parts) > 1: |
|
base_model_name = checkpoint_parts[-2] |
|
|
|
|
|
ui_type_results = detailed_results.get("by_ui_type", {}) |
|
dataset_type_results = detailed_results.get("by_dataset_type", {}) |
|
|
|
|
|
result_entry = { |
|
"dataset": dataset_name, |
|
"model": model_name, |
|
"base_model": base_model_name or model_name, |
|
"is_checkpoint": is_checkpoint, |
|
"model_path": model_checkpoint, |
|
"overall_accuracy": metrics.get("accuracy", 0) * 100, |
|
"total_samples": metrics.get("total", 0), |
|
"timestamp": metadata.get("evaluation_timestamp", ""), |
|
"checkpoint_steps": metadata.get("checkpoint_steps"), |
|
"training_loss": metadata.get("training_loss"), |
|
"ui_type_results": ui_type_results, |
|
"dataset_type_results": dataset_type_results |
|
} |
|
|
|
results.append(result_entry) |
|
|
|
except Exception as e: |
|
st.warning(f"Error loading {file_path}: {str(e)}") |
|
continue |
|
|
|
|
|
progress_bar.empty() |
|
status_text.empty() |
|
|
|
|
|
df = pd.DataFrame(results) |
|
|
|
|
|
if not df.empty: |
|
|
|
grouped = df.groupby(['dataset', 'base_model']) |
|
|
|
|
|
best_models = [] |
|
for (dataset, base_model), group in grouped: |
|
if len(group) > 1: |
|
|
|
best_idx = group['overall_accuracy'].idxmax() |
|
best_row = group.loc[best_idx].copy() |
|
|
|
|
|
checkpoint_steps = group[group['checkpoint_steps'].notna()]['checkpoint_steps'].sort_values() |
|
if len(checkpoint_steps) > 0: |
|
last_checkpoint_steps = checkpoint_steps.iloc[-1] |
|
best_checkpoint_steps = best_row['checkpoint_steps'] |
|
if pd.notna(best_checkpoint_steps) and best_checkpoint_steps != last_checkpoint_steps: |
|
|
|
best_row['model'] = best_row['model'] + '*' |
|
best_row['is_best_not_last'] = True |
|
else: |
|
best_row['is_best_not_last'] = False |
|
|
|
|
|
best_row['all_checkpoints'] = group.to_dict('records') |
|
best_models.append(best_row) |
|
else: |
|
|
|
row = group.iloc[0].copy() |
|
row['is_best_not_last'] = False |
|
row['all_checkpoints'] = [row.to_dict()] |
|
best_models.append(row) |
|
|
|
|
|
df_best = pd.DataFrame(best_models) |
|
return df_best |
|
|
|
return df |
|
|
|
except Exception as e: |
|
st.error(f"Error fetching leaderboard data: {str(e)}") |
|
return pd.DataFrame() |
|
|
|
def parse_ui_type_metrics(df: pd.DataFrame, dataset_filter: str) -> pd.DataFrame: |
|
"""Parse UI type metrics from the results dataframe.""" |
|
metrics_list = [] |
|
|
|
for _, row in df.iterrows(): |
|
if row['dataset'] != dataset_filter: |
|
continue |
|
|
|
model = row['model'] |
|
ui_results = row.get('ui_type_results', {}) |
|
dataset_type_results = row.get('dataset_type_results', {}) |
|
|
|
|
|
if 'screenspot' in dataset_filter.lower(): |
|
|
|
desktop_text = ui_results.get('desktop_text', {}).get('correct', 0) / max(ui_results.get('desktop_text', {}).get('total', 1), 1) * 100 |
|
desktop_icon = ui_results.get('desktop_icon', {}).get('correct', 0) / max(ui_results.get('desktop_icon', {}).get('total', 1), 1) * 100 |
|
web_text = ui_results.get('web_text', {}).get('correct', 0) / max(ui_results.get('web_text', {}).get('total', 1), 1) * 100 |
|
web_icon = ui_results.get('web_icon', {}).get('correct', 0) / max(ui_results.get('web_icon', {}).get('total', 1), 1) * 100 |
|
|
|
|
|
if desktop_text == 0 and desktop_icon == 0 and web_text == 0 and web_icon == 0: |
|
|
|
for dataset_key in dataset_type_results: |
|
if 'screenspot' in dataset_key.lower(): |
|
dataset_data = dataset_type_results[dataset_key] |
|
if 'by_ui_type' in dataset_data: |
|
ui_data = dataset_data['by_ui_type'] |
|
desktop_text = ui_data.get('desktop_text', {}).get('correct', 0) / max(ui_data.get('desktop_text', {}).get('total', 1), 1) * 100 |
|
desktop_icon = ui_data.get('desktop_icon', {}).get('correct', 0) / max(ui_data.get('desktop_icon', {}).get('total', 1), 1) * 100 |
|
web_text = ui_data.get('web_text', {}).get('correct', 0) / max(ui_data.get('web_text', {}).get('total', 1), 1) * 100 |
|
web_icon = ui_data.get('web_icon', {}).get('correct', 0) / max(ui_data.get('web_icon', {}).get('total', 1), 1) * 100 |
|
break |
|
|
|
|
|
desktop_avg = (desktop_text + desktop_icon) / 2 if (desktop_text > 0 or desktop_icon > 0) else 0 |
|
web_avg = (web_text + web_icon) / 2 if (web_text > 0 or web_icon > 0) else 0 |
|
text_avg = (desktop_text + web_text) / 2 if (desktop_text > 0 or web_text > 0) else 0 |
|
icon_avg = (desktop_icon + web_icon) / 2 if (desktop_icon > 0 or web_icon > 0) else 0 |
|
|
|
|
|
if dataset_filter == 'screenspot-v2': |
|
overall = (desktop_avg + web_avg) / 2 if (desktop_avg > 0 or web_avg > 0) else row['overall_accuracy'] |
|
else: |
|
overall = row['overall_accuracy'] |
|
|
|
metrics_list.append({ |
|
'model': model, |
|
'desktop_text': desktop_text, |
|
'desktop_icon': desktop_icon, |
|
'web_text': web_text, |
|
'web_icon': web_icon, |
|
'desktop_avg': desktop_avg, |
|
'web_avg': web_avg, |
|
'text_avg': text_avg, |
|
'icon_avg': icon_avg, |
|
'overall': overall, |
|
'is_best_not_last': row.get('is_best_not_last', False), |
|
'all_checkpoints': row.get('all_checkpoints', []) |
|
}) |
|
else: |
|
|
|
metrics_list.append({ |
|
'model': model, |
|
'overall': row['overall_accuracy'], |
|
'is_best_not_last': row.get('is_best_not_last', False), |
|
'all_checkpoints': row.get('all_checkpoints', []) |
|
}) |
|
|
|
return pd.DataFrame(metrics_list) |
|
|
|
def create_bar_chart(data: pd.DataFrame, metric: str, title: str): |
|
"""Create a bar chart for a specific metric.""" |
|
|
|
chart_data = [] |
|
|
|
|
|
for _, row in data.iterrows(): |
|
if metric in row and row[metric] > 0: |
|
chart_data.append({ |
|
'Model': row['model'], |
|
'Score': row[metric], |
|
'Type': 'Evaluated' |
|
}) |
|
|
|
|
|
dataset = st.session_state.get('selected_dataset', '') |
|
if dataset in BASELINES: |
|
for baseline_name, baseline_metrics in BASELINES[dataset].items(): |
|
metric_key = metric.replace('_avg', '').replace('avg', 'overall') |
|
if metric_key in baseline_metrics: |
|
chart_data.append({ |
|
'Model': baseline_name, |
|
'Score': baseline_metrics[metric_key], |
|
'Type': 'Baseline' |
|
}) |
|
|
|
if not chart_data: |
|
return None |
|
|
|
df_chart = pd.DataFrame(chart_data) |
|
|
|
|
|
chart = alt.Chart(df_chart).mark_bar().encode( |
|
x=alt.X('Model:N', |
|
sort=alt.EncodingSortField(field='Score', order='descending'), |
|
axis=alt.Axis(labelAngle=-45)), |
|
y=alt.Y('Score:Q', |
|
scale=alt.Scale(domain=[0, 100]), |
|
axis=alt.Axis(title='Score (%)')), |
|
color=alt.Color('Type:N', |
|
scale=alt.Scale(domain=['Evaluated', 'Baseline'], |
|
range=['#4ECDC4', '#FFA726'])), |
|
tooltip=['Model', 'Score', 'Type'] |
|
).properties( |
|
title=title, |
|
width=500, |
|
height=400 |
|
) |
|
|
|
|
|
text = chart.mark_text( |
|
align='center', |
|
baseline='bottom', |
|
dy=-5 |
|
).encode( |
|
text=alt.Text('Score:Q', format='.1f') |
|
) |
|
|
|
return chart + text |
|
|
|
def main(): |
|
st.title("🎯 Grounding Benchmark Leaderboard") |
|
st.markdown("Visualization of model performance on grounding benchmarks") |
|
|
|
|
|
with st.spinner("Loading leaderboard data..."): |
|
df = fetch_leaderboard_data() |
|
|
|
if df.empty: |
|
st.warning("No data available in the leaderboard.") |
|
return |
|
|
|
|
|
st.sidebar.header("Filters") |
|
|
|
|
|
datasets = sorted(df['dataset'].unique()) |
|
selected_dataset = st.sidebar.selectbox("Select Dataset", datasets) |
|
st.session_state['selected_dataset'] = selected_dataset |
|
|
|
|
|
filtered_df = df[df['dataset'] == selected_dataset] |
|
|
|
|
|
models = ['All'] + sorted(filtered_df['model'].unique()) |
|
selected_model = st.sidebar.selectbox("Select Model", models) |
|
|
|
if selected_model != 'All': |
|
filtered_df = filtered_df[filtered_df['model'] == selected_model] |
|
|
|
|
|
st.header(f"Results for {selected_dataset}") |
|
|
|
|
|
with st.expander("Debug Information"): |
|
st.write(f"Total rows in filtered_df: {len(filtered_df)}") |
|
st.write(f"Total rows in ui_metrics_df: {len(ui_metrics_df)}") |
|
if not filtered_df.empty: |
|
st.write("Sample data from filtered_df:") |
|
st.write(filtered_df[['model', 'base_model', 'is_checkpoint', 'overall_accuracy']].head()) |
|
|
|
|
|
st.write("\nUI Type Results Structure:") |
|
for idx, row in filtered_df.head(2).iterrows(): |
|
st.write(f"\nModel: {row['model']}") |
|
ui_results = row.get('ui_type_results', {}) |
|
if ui_results: |
|
st.write("UI Type Keys:", list(ui_results.keys())) |
|
|
|
for key in list(ui_results.keys())[:2]: |
|
st.write(f" {key}: {ui_results[key]}") |
|
else: |
|
st.write(" No UI type results found") |
|
|
|
|
|
dataset_type_results = row.get('dataset_type_results', {}) |
|
if dataset_type_results: |
|
st.write("Dataset Type Results Keys:", list(dataset_type_results.keys())) |
|
for key in list(dataset_type_results.keys())[:2]: |
|
st.write(f" {key}: {dataset_type_results[key]}") |
|
|
|
if not ui_metrics_df.empty: |
|
st.write("\nSample data from ui_metrics_df:") |
|
st.write(ui_metrics_df[['model', 'overall', 'desktop_avg', 'web_avg']].head()) |
|
|
|
|
|
col1, col2, col3 = st.columns(3) |
|
with col1: |
|
st.metric("Models Evaluated", len(filtered_df)) |
|
with col2: |
|
if not filtered_df.empty: |
|
best_acc = filtered_df['overall_accuracy'].max() |
|
best_model = filtered_df[filtered_df['overall_accuracy'] == best_acc]['model'].iloc[0] |
|
st.metric("Best Overall Accuracy", f"{best_acc:.1f}%", help=f"Model: {best_model}") |
|
with col3: |
|
total_samples = filtered_df['total_samples'].sum() |
|
st.metric("Total Samples Evaluated", f"{total_samples:,}") |
|
|
|
|
|
ui_metrics_df = parse_ui_type_metrics(filtered_df, selected_dataset) |
|
|
|
|
|
selected_metric = 'overall' |
|
if not ui_metrics_df.empty and 'screenspot' in selected_dataset.lower(): |
|
st.subheader("Performance by UI Type") |
|
|
|
|
|
if selected_dataset == 'screenspot-v2': |
|
metric_options = { |
|
'overall': 'Overall Average (Desktop + Web) / 2', |
|
'desktop_avg': 'Desktop Average', |
|
'web_avg': 'Web Average', |
|
'desktop_text': 'Desktop (Text)', |
|
'desktop_icon': 'Desktop (Icon)', |
|
'web_text': 'Web (Text)', |
|
'web_icon': 'Web (Icon)', |
|
'text_avg': 'Text Average', |
|
'icon_avg': 'Icon Average' |
|
} |
|
else: |
|
metric_options = { |
|
'overall': 'Overall Average', |
|
'desktop_avg': 'Desktop Average', |
|
'web_avg': 'Web Average', |
|
'text_avg': 'Text Average', |
|
'icon_avg': 'Icon Average' |
|
} |
|
|
|
selected_metric = st.selectbox( |
|
"Select metric to visualize:", |
|
options=list(metric_options.keys()), |
|
format_func=lambda x: metric_options[x], |
|
key="metric_selector" |
|
) |
|
|
|
|
|
if any(ui_metrics_df['is_best_not_last']): |
|
st.info("* indicates the best checkpoint is not the last checkpoint") |
|
|
|
|
|
chart = create_bar_chart(ui_metrics_df, selected_metric, metric_options[selected_metric]) |
|
if chart: |
|
st.altair_chart(chart, use_container_width=True) |
|
else: |
|
st.warning(f"No data available for {metric_options[selected_metric]}") |
|
|
|
|
|
with st.expander("View All Metrics"): |
|
if selected_dataset == 'screenspot-v2': |
|
|
|
col1, col2, col3 = st.columns(3) |
|
|
|
with col1: |
|
chart = create_bar_chart(ui_metrics_df, 'overall', 'Overall Average (Desktop + Web) / 2') |
|
if chart: |
|
st.altair_chart(chart, use_container_width=True) |
|
|
|
with col2: |
|
chart = create_bar_chart(ui_metrics_df, 'desktop_avg', 'Desktop Average') |
|
if chart: |
|
st.altair_chart(chart, use_container_width=True) |
|
|
|
with col3: |
|
chart = create_bar_chart(ui_metrics_df, 'web_avg', 'Web Average') |
|
if chart: |
|
st.altair_chart(chart, use_container_width=True) |
|
|
|
|
|
col1, col2, col3, col4 = st.columns(4) |
|
|
|
with col1: |
|
chart = create_bar_chart(ui_metrics_df, 'desktop_text', 'Desktop (Text)') |
|
if chart: |
|
st.altair_chart(chart, use_container_width=True) |
|
|
|
with col2: |
|
chart = create_bar_chart(ui_metrics_df, 'desktop_icon', 'Desktop (Icon)') |
|
if chart: |
|
st.altair_chart(chart, use_container_width=True) |
|
|
|
with col3: |
|
chart = create_bar_chart(ui_metrics_df, 'web_text', 'Web (Text)') |
|
if chart: |
|
st.altair_chart(chart, use_container_width=True) |
|
|
|
with col4: |
|
chart = create_bar_chart(ui_metrics_df, 'web_icon', 'Web (Icon)') |
|
if chart: |
|
st.altair_chart(chart, use_container_width=True) |
|
|
|
|
|
col1, col2 = st.columns(2) |
|
|
|
with col1: |
|
chart = create_bar_chart(ui_metrics_df, 'text_avg', 'Text Average (Desktop + Web)') |
|
if chart: |
|
st.altair_chart(chart, use_container_width=True) |
|
|
|
with col2: |
|
chart = create_bar_chart(ui_metrics_df, 'icon_avg', 'Icon Average (Desktop + Web)') |
|
if chart: |
|
st.altair_chart(chart, use_container_width=True) |
|
else: |
|
|
|
col1, col2 = st.columns(2) |
|
|
|
with col1: |
|
|
|
chart = create_bar_chart(ui_metrics_df, 'overall', 'Overall Average') |
|
if chart: |
|
st.altair_chart(chart, use_container_width=True) |
|
|
|
|
|
chart = create_bar_chart(ui_metrics_df, 'desktop_avg', 'Desktop Average') |
|
if chart: |
|
st.altair_chart(chart, use_container_width=True) |
|
|
|
|
|
chart = create_bar_chart(ui_metrics_df, 'text_avg', 'Text Average (UI-Type)') |
|
if chart: |
|
st.altair_chart(chart, use_container_width=True) |
|
|
|
with col2: |
|
|
|
chart = create_bar_chart(ui_metrics_df, 'web_avg', 'Web Average') |
|
if chart: |
|
st.altair_chart(chart, use_container_width=True) |
|
|
|
|
|
chart = create_bar_chart(ui_metrics_df, 'icon_avg', 'Icon Average (UI-Type)') |
|
if chart: |
|
st.altair_chart(chart, use_container_width=True) |
|
|
|
|
|
with st.expander("Checkpoint Progression Analysis"): |
|
|
|
models_with_checkpoints = ui_metrics_df[ui_metrics_df['all_checkpoints'].apply(lambda x: len(x) > 1)] |
|
|
|
if not models_with_checkpoints.empty: |
|
selected_checkpoint_model = st.selectbox( |
|
"Select a model to view checkpoint progression:", |
|
models_with_checkpoints['model'].str.replace('*', '').unique() |
|
) |
|
|
|
|
|
model_row = models_with_checkpoints[models_with_checkpoints['model'].str.replace('*', '') == selected_checkpoint_model].iloc[0] |
|
checkpoint_data = model_row['all_checkpoints'] |
|
|
|
|
|
checkpoint_df = pd.DataFrame(checkpoint_data) |
|
|
|
|
|
checkpoint_metrics = [] |
|
for _, cp in checkpoint_df.iterrows(): |
|
ui_results = cp.get('ui_type_results', {}) |
|
dataset_type_results = cp.get('dataset_type_results', {}) |
|
|
|
|
|
desktop_text = ui_results.get('desktop_text', {}).get('correct', 0) / max(ui_results.get('desktop_text', {}).get('total', 1), 1) * 100 |
|
desktop_icon = ui_results.get('desktop_icon', {}).get('correct', 0) / max(ui_results.get('desktop_icon', {}).get('total', 1), 1) * 100 |
|
web_text = ui_results.get('web_text', {}).get('correct', 0) / max(ui_results.get('web_text', {}).get('total', 1), 1) * 100 |
|
web_icon = ui_results.get('web_icon', {}).get('correct', 0) / max(ui_results.get('web_icon', {}).get('total', 1), 1) * 100 |
|
|
|
|
|
if desktop_text == 0 and desktop_icon == 0 and web_text == 0 and web_icon == 0: |
|
|
|
for dataset_key in dataset_type_results: |
|
if 'screenspot' in dataset_key.lower(): |
|
dataset_data = dataset_type_results[dataset_key] |
|
if 'by_ui_type' in dataset_data: |
|
ui_data = dataset_data['by_ui_type'] |
|
desktop_text = ui_data.get('desktop_text', {}).get('correct', 0) / max(ui_data.get('desktop_text', {}).get('total', 1), 1) * 100 |
|
desktop_icon = ui_data.get('desktop_icon', {}).get('correct', 0) / max(ui_data.get('desktop_icon', {}).get('total', 1), 1) * 100 |
|
web_text = ui_data.get('web_text', {}).get('correct', 0) / max(ui_data.get('web_text', {}).get('total', 1), 1) * 100 |
|
web_icon = ui_data.get('web_icon', {}).get('correct', 0) / max(ui_data.get('web_icon', {}).get('total', 1), 1) * 100 |
|
break |
|
|
|
desktop_avg = (desktop_text + desktop_icon) / 2 |
|
web_avg = (web_text + web_icon) / 2 |
|
text_avg = (desktop_text + web_text) / 2 |
|
icon_avg = (desktop_icon + web_icon) / 2 |
|
overall = (desktop_avg + web_avg) / 2 if selected_dataset == 'screenspot-v2' else cp['overall_accuracy'] |
|
|
|
checkpoint_metrics.append({ |
|
'steps': cp['checkpoint_steps'] or 0, |
|
'overall': overall, |
|
'desktop_avg': desktop_avg, |
|
'web_avg': web_avg, |
|
'desktop_text': desktop_text, |
|
'desktop_icon': desktop_icon, |
|
'web_text': web_text, |
|
'web_icon': web_icon, |
|
'text_avg': text_avg, |
|
'icon_avg': icon_avg, |
|
'loss': cp['training_loss'], |
|
'neg_log_loss': -np.log(cp['training_loss']) if cp['training_loss'] and cp['training_loss'] > 0 else None |
|
}) |
|
|
|
metrics_df = pd.DataFrame(checkpoint_metrics).sort_values('steps') |
|
|
|
|
|
col1, col2 = st.columns(2) |
|
|
|
with col1: |
|
st.write("**Accuracy over Training Steps**") |
|
|
|
|
|
if selected_metric == 'overall': |
|
|
|
metrics_to_show = ['overall', 'desktop_avg', 'web_avg'] |
|
metric_labels = ['Overall', 'Desktop Avg', 'Web Avg'] |
|
colors = ['#4ECDC4', '#45B7D1', '#96CEB4'] |
|
elif 'desktop' in selected_metric: |
|
|
|
metrics_to_show = ['desktop_avg', 'desktop_text', 'desktop_icon'] |
|
metric_labels = ['Desktop Avg', 'Desktop Text', 'Desktop Icon'] |
|
colors = ['#45B7D1', '#FFA726', '#FF6B6B'] |
|
elif 'web' in selected_metric: |
|
|
|
metrics_to_show = ['web_avg', 'web_text', 'web_icon'] |
|
metric_labels = ['Web Avg', 'Web Text', 'Web Icon'] |
|
colors = ['#96CEB4', '#9C27B0', '#E91E63'] |
|
elif 'text' in selected_metric: |
|
|
|
metrics_to_show = ['text_avg', 'desktop_text', 'web_text'] |
|
metric_labels = ['Text Avg', 'Desktop Text', 'Web Text'] |
|
colors = ['#FF9800', '#FFA726', '#FFB74D'] |
|
elif 'icon' in selected_metric: |
|
|
|
metrics_to_show = ['icon_avg', 'desktop_icon', 'web_icon'] |
|
metric_labels = ['Icon Avg', 'Desktop Icon', 'Web Icon'] |
|
colors = ['#3F51B5', '#5C6BC0', '#7986CB'] |
|
else: |
|
|
|
metrics_to_show = [selected_metric] |
|
metric_labels = [metric_options.get(selected_metric, selected_metric)] |
|
colors = ['#4ECDC4'] |
|
|
|
|
|
chart_data = [] |
|
for i, (metric, label) in enumerate(zip(metrics_to_show, metric_labels)): |
|
for _, row in metrics_df.iterrows(): |
|
if metric in row: |
|
chart_data.append({ |
|
'steps': row['steps'], |
|
'value': row[metric], |
|
'metric': label, |
|
'color_idx': i |
|
}) |
|
|
|
if chart_data: |
|
chart_df = pd.DataFrame(chart_data) |
|
|
|
|
|
chart = alt.Chart(chart_df).mark_line(point=True, strokeWidth=2).encode( |
|
x=alt.X('steps:Q', title='Training Steps'), |
|
y=alt.Y('value:Q', scale=alt.Scale(domain=[0, 100]), title='Accuracy (%)'), |
|
color=alt.Color('metric:N', |
|
scale=alt.Scale(domain=metric_labels, range=colors), |
|
legend=alt.Legend(title="Metric")), |
|
tooltip=['steps:Q', 'metric:N', alt.Tooltip('value:Q', format='.1f', title='Accuracy')] |
|
).properties( |
|
width=500, |
|
height=400, |
|
title='Accuracy Progression During Training' |
|
) |
|
st.altair_chart(chart, use_container_width=True) |
|
else: |
|
st.warning("No data available for the selected metrics") |
|
|
|
with col2: |
|
st.write(f"**{metric_options[selected_metric]} vs. Training Loss**") |
|
|
|
if metrics_df['neg_log_loss'].notna().any(): |
|
scatter_data = metrics_df[metrics_df['neg_log_loss'].notna()] |
|
|
|
chart = alt.Chart(scatter_data).mark_circle(size=100).encode( |
|
x=alt.X('neg_log_loss:Q', title='-log(Training Loss)'), |
|
y=alt.Y(f'{selected_metric}:Q', scale=alt.Scale(domain=[0, 100]), title=f'{metric_options[selected_metric]} (%)'), |
|
color=alt.Color('steps:Q', scale=alt.Scale(scheme='viridis'), title='Training Steps'), |
|
tooltip=['steps', 'loss', selected_metric] |
|
).properties( |
|
width=500, |
|
height=400, |
|
title=f'{metric_options[selected_metric]} vs. -log(Training Loss)' |
|
) |
|
st.altair_chart(chart, use_container_width=True) |
|
else: |
|
st.info("No training loss data available for this model") |
|
|
|
|
|
st.write("**Checkpoint Details**") |
|
|
|
|
|
if selected_metric == 'overall': |
|
display_cols = ['steps', 'overall', 'desktop_avg', 'web_avg', 'loss'] |
|
col_labels = ['Steps', 'Overall %', 'Desktop Avg %', 'Web Avg %', 'Training Loss'] |
|
elif 'desktop' in selected_metric: |
|
display_cols = ['steps', 'desktop_avg', 'desktop_text', 'desktop_icon', 'loss'] |
|
col_labels = ['Steps', 'Desktop Avg %', 'Desktop Text %', 'Desktop Icon %', 'Training Loss'] |
|
elif 'web' in selected_metric: |
|
display_cols = ['steps', 'web_avg', 'web_text', 'web_icon', 'loss'] |
|
col_labels = ['Steps', 'Web Avg %', 'Web Text %', 'Web Icon %', 'Training Loss'] |
|
elif 'text' in selected_metric: |
|
display_cols = ['steps', 'text_avg', 'desktop_text', 'web_text', 'loss'] |
|
col_labels = ['Steps', 'Text Avg %', 'Desktop Text %', 'Web Text %', 'Training Loss'] |
|
elif 'icon' in selected_metric: |
|
display_cols = ['steps', 'icon_avg', 'desktop_icon', 'web_icon', 'loss'] |
|
col_labels = ['Steps', 'Icon Avg %', 'Desktop Icon %', 'Web Icon %', 'Training Loss'] |
|
else: |
|
display_cols = ['steps', selected_metric, 'loss'] |
|
col_labels = ['Steps', f'{metric_options[selected_metric]} %', 'Training Loss'] |
|
|
|
display_metrics = metrics_df[display_cols].copy() |
|
display_metrics.columns = col_labels |
|
|
|
|
|
for col in col_labels: |
|
if '%' in col and col != 'Training Loss': |
|
display_metrics[col] = display_metrics[col].round(2) |
|
|
|
display_metrics['Training Loss'] = display_metrics['Training Loss'].apply(lambda x: f"{x:.4f}" if pd.notna(x) else "N/A") |
|
st.dataframe(display_metrics, use_container_width=True) |
|
else: |
|
st.info("No models with multiple checkpoints available for progression analysis") |
|
|
|
|
|
if selected_dataset == 'screenspot-v2': |
|
with st.expander("Detailed UI Type Breakdown"): |
|
|
|
detailed_metrics = [] |
|
for _, row in ui_metrics_df.iterrows(): |
|
detailed_metrics.append({ |
|
'Model': row['model'], |
|
'Desktop Text': f"{row['desktop_text']:.1f}%", |
|
'Desktop Icon': f"{row['desktop_icon']:.1f}%", |
|
'Web Text': f"{row['web_text']:.1f}%", |
|
'Web Icon': f"{row['web_icon']:.1f}%", |
|
'Overall': f"{row['overall']:.1f}%" |
|
}) |
|
|
|
if detailed_metrics: |
|
st.dataframe(pd.DataFrame(detailed_metrics), use_container_width=True) |
|
|
|
else: |
|
|
|
st.subheader("Model Performance") |
|
|
|
chart_data = filtered_df[['model', 'overall_accuracy']].copy() |
|
chart_data.columns = ['Model', 'Accuracy'] |
|
|
|
chart = alt.Chart(chart_data).mark_bar().encode( |
|
x=alt.X('Model:N', sort='-y', axis=alt.Axis(labelAngle=-45)), |
|
y=alt.Y('Accuracy:Q', scale=alt.Scale(domain=[0, 100])), |
|
tooltip=['Model', 'Accuracy'] |
|
).properties( |
|
width=800, |
|
height=400 |
|
) |
|
|
|
st.altair_chart(chart, use_container_width=True) |
|
|
|
|
|
with st.expander("Model Details"): |
|
display_df = filtered_df[['model', 'overall_accuracy', 'total_samples', 'checkpoint_steps', 'training_loss', 'timestamp']].copy() |
|
display_df.columns = ['Model', 'Accuracy (%)', 'Samples', 'Checkpoint Steps', 'Training Loss', 'Timestamp'] |
|
display_df['Accuracy (%)'] = display_df['Accuracy (%)'].apply(lambda x: f"{x:.2f}") |
|
display_df['Training Loss'] = display_df['Training Loss'].apply(lambda x: f"{x:.4f}" if pd.notna(x) else "N/A") |
|
st.dataframe(display_df, use_container_width=True) |
|
|
|
if __name__ == "__main__": |
|
main() |