File size: 11,652 Bytes
944a871
 
 
 
 
 
 
ad7599c
 
 
 
 
 
 
 
 
 
 
83243ea
 
4a955b1
944a871
ad7599c
 
 
 
 
 
 
4a955b1
ad7599c
 
 
4a955b1
ad7599c
 
 
4a955b1
ad7599c
 
 
4a955b1
83243ea
ad7599c
 
 
 
4a955b1
ad7599c
 
 
 
 
4a955b1
ad7599c
 
 
4a955b1
944a871
ad7599c
944a871
4a955b1
944a871
 
ad7599c
83243ea
 
4a955b1
944a871
83243ea
 
944a871
4a955b1
944a871
83243ea
944a871
 
ad7599c
 
 
 
944a871
ad7599c
4a955b1
2b2c10a
 
 
83243ea
2b2c10a
 
 
 
83243ea
944a871
4a955b1
944a871
83243ea
 
 
ad7599c
 
83243ea
 
4a955b1
944a871
 
 
4a955b1
944a871
ad7599c
4a955b1
83243ea
ad7599c
4a955b1
944a871
 
83243ea
4a955b1
944a871
 
4a955b1
944a871
 
83243ea
944a871
83243ea
944a871
4a955b1
83243ea
944a871
4a955b1
944a871
83243ea
944a871
 
ad7599c
83243ea
944a871
 
 
ad7599c
 
944a871
83243ea
4a955b1
944a871
83243ea
4a955b1
24ebdcb
ad7599c
944a871
24ebdcb
4a955b1
ad7599c
 
4a955b1
24ebdcb
944a871
ad7599c
 
 
 
4a955b1
ad7599c
4a955b1
ad7599c
 
4a955b1
83243ea
 
4a955b1
ad7599c
 
4a955b1
ad7599c
83243ea
4a955b1
944a871
ad7599c
944a871
4a955b1
24ebdcb
944a871
 
4a955b1
24ebdcb
83243ea
4a955b1
24ebdcb
944a871
ad7599c
 
 
4a955b1
ad7599c
4a955b1
ad7599c
 
 
4a955b1
ad7599c
4a955b1
 
ad7599c
 
4a955b1
ad7599c
 
4a955b1
ad7599c
 
 
83243ea
ad7599c
4a955b1
83243ea
4a955b1
ad7599c
 
83243ea
4a955b1
83243ea
 
 
4a955b1
83243ea
ad7599c
 
 
 
4a955b1
ad7599c
4a955b1
ad7599c
 
 
4a955b1
ad7599c
4a955b1
 
ad7599c
 
 
83243ea
ad7599c
4a955b1
ad7599c
4a955b1
ad7599c
 
4a955b1
83243ea
 
 
 
 
 
 
4a955b1
83243ea
4a955b1
ad7599c
 
 
 
4a955b1
 
ad7599c
83243ea
ad7599c
 
4a955b1
ad7599c
 
4a955b1
ad7599c
4a955b1
ceca234
83243ea
 
ceca234
 
ad7599c
 
 
4a955b1
83243ea
ceca234
83243ea
4a955b1
ad7599c
 
4a955b1
ad7599c
 
4a955b1
ad7599c
 
 
83243ea
ad7599c
4a955b1
796d1cd
 
4a955b1
ad7599c
 
4a955b1
ad7599c
 
4a955b1
ad7599c
 
 
 
 
796d1cd
4a955b1
796d1cd
4a955b1
796d1cd
4a955b1
796d1cd
 
ad7599c
 
 
 
 
796d1cd
4a955b1
796d1cd
 
ad7599c
 
 
4a955b1
ad7599c
4a955b1
ad7599c
4a955b1
ad7599c
 
 
4a955b1
796d1cd
ad7599c
4a955b1
 
796d1cd
ad7599c
796d1cd
ad7599c
 
 
 
 
 
 
 
 
 
796d1cd
4a955b1
796d1cd
4a955b1
83243ea
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
# src/leaderboard.py
import pandas as pd
from datasets import Dataset, load_dataset
import json
import datetime
from typing import Dict, List, Optional, Tuple
import os
import numpy as np
from config import (
    LEADERBOARD_DATASET,
    HF_TOKEN,
    EVALUATION_TRACKS,
    MODEL_CATEGORIES,
    METRICS_CONFIG,
)
from src.utils import create_submission_id, sanitize_model_name


def initialize_leaderboard() -> pd.DataFrame:
    """Initialize empty leaderboard DataFrame with all required columns."""
    
    columns = {
        # Basic information
        "submission_id": [],
        "model_name": [],
        "author": [],
        "submission_date": [],
        "model_category": [],
        "description": [],
        
        # Track-specific quality scores
        "google_comparable_quality": [],
        "ug40_complete_quality": [],
        
        # Track-specific BLEU scores
        "google_comparable_bleu": [],
        "ug40_complete_bleu": [],
        
        # Track-specific ChrF scores
        "google_comparable_chrf": [],
        "ug40_complete_chrf": [],
        
        # Confidence intervals
        "google_comparable_ci_lower": [],
        "google_comparable_ci_upper": [],
        "ug40_complete_ci_lower": [],
        "ug40_complete_ci_upper": [],
        
        # Coverage information
        "google_comparable_samples": [],
        "ug40_complete_samples": [],
        "google_comparable_pairs": [],
        "ug40_complete_pairs": [],
        
        # Detailed results (JSON strings)
        "detailed_google_comparable": [],
        "detailed_ug40_complete": [],
        
        # Metadata
        "evaluation_date": [],
    }
    
    return pd.DataFrame(columns)


def load_leaderboard() -> pd.DataFrame:
    """Load current leaderboard from HuggingFace dataset."""
    
    try:
        print("πŸ“₯ Loading leaderboard...")
        dataset = load_dataset(LEADERBOARD_DATASET, split="train", token=HF_TOKEN)
        df = dataset.to_pandas()
        
        # Ensure all required columns exist
        required_columns = list(initialize_leaderboard().columns)
        for col in required_columns:
            if col not in df.columns:
                if "quality" in col or "bleu" in col or "chrf" in col or "ci_" in col:
                    df[col] = 0.0
                elif "samples" in col or "pairs" in col:
                    df[col] = 0
                else:
                    df[col] = ""
        
        # Ensure proper data types for numeric columns
        numeric_columns = [
            col for col in df.columns 
            if any(x in col for x in ["quality", "bleu", "chrf", "ci_", "samples", "pairs"])
        ]
        for col in numeric_columns:
            df[col] = pd.to_numeric(df[col], errors='coerce').fillna(0.0)
        
        print(f"βœ… Loaded leaderboard with {len(df)} entries")
        return df
        
    except Exception as e:
        print(f"⚠️ Could not load leaderboard: {e}")
        print("πŸ”„ Initializing empty leaderboard...")
        return initialize_leaderboard()


def save_leaderboard(df: pd.DataFrame) -> bool:
    """Save leaderboard to HuggingFace dataset."""
    
    try:
        # Clean data before saving
        df_clean = df.copy()
        
        # Ensure numeric columns are proper types
        numeric_columns = [
            col for col in df_clean.columns 
            if any(x in col for x in ["quality", "bleu", "chrf", "ci_", "samples", "pairs"])
        ]
        
        for col in numeric_columns:
            if col in df_clean.columns:
                df_clean[col] = pd.to_numeric(df_clean[col], errors="coerce").fillna(0.0)
        
        # Convert to dataset
        dataset = Dataset.from_pandas(df_clean)
        
        # Push to hub
        dataset.push_to_hub(
            LEADERBOARD_DATASET,
            token=HF_TOKEN,
            commit_message=f"Update leaderboard - {datetime.datetime.now().isoformat()[:19]}",
        )
        
        print("βœ… Leaderboard saved successfully!")
        return True
        
    except Exception as e:
        print(f"❌ Error saving leaderboard: {e}")
        return False


def add_model_to_leaderboard(
    model_name: str,
    author: str,
    evaluation_results: Dict,
    model_category: str = "community",
    description: str = "",
) -> pd.DataFrame:
    """Add new model results to leaderboard."""
    
    # Load current leaderboard
    df = load_leaderboard()
    
    # Remove existing entry if present
    existing_mask = df["model_name"] == model_name
    if existing_mask.any():
        df = df[~existing_mask]
    
    # Extract track results
    tracks = evaluation_results.get("tracks", {})
    
    # Prepare new entry
    new_entry = {
        "submission_id": create_submission_id(),
        "model_name": sanitize_model_name(model_name),
        "author": author[:100] if author else "Anonymous",
        "submission_date": datetime.datetime.now().isoformat(),
        "model_category": model_category if model_category in MODEL_CATEGORIES else "community",
        "description": description[:500] if description else "",
        
        # Extract track-specific metrics
        **extract_track_metrics(tracks),
        
        # Confidence intervals
        **extract_confidence_intervals(tracks),
        
        # Coverage information
        **extract_coverage_information(tracks),
        
        # Detailed results (JSON strings)
        **serialize_detailed_results(tracks),
        
        # Metadata
        "evaluation_date": datetime.datetime.now().isoformat(),
    }
    
    # Convert to DataFrame and append
    new_row_df = pd.DataFrame([new_entry])
    updated_df = pd.concat([df, new_row_df], ignore_index=True)
    
    # Save to hub
    save_leaderboard(updated_df)
    
    return updated_df


def extract_track_metrics(tracks: Dict) -> Dict:
    """Extract primary metrics from each track."""
    
    metrics = {}
    
    for track_name in EVALUATION_TRACKS.keys():
        track_data = tracks.get(track_name, {})
        track_averages = track_data.get("track_averages", {})
        
        # Quality score
        metrics[f"{track_name}_quality"] = float(track_averages.get("quality_score", 0.0))
        
        # BLEU score
        metrics[f"{track_name}_bleu"] = float(track_averages.get("bleu", 0.0))
        
        # ChrF score
        metrics[f"{track_name}_chrf"] = float(track_averages.get("chrf", 0.0))
    
    return metrics


def extract_confidence_intervals(tracks: Dict) -> Dict:
    """Extract confidence intervals from each track."""
    
    ci_data = {}
    
    for track_name in EVALUATION_TRACKS.keys():
        track_data = tracks.get(track_name, {})
        track_confidence = track_data.get("track_confidence", {})
        
        quality_stats = track_confidence.get("quality_score", {})
        ci_data[f"{track_name}_ci_lower"] = float(quality_stats.get("ci_lower", 0.0))
        ci_data[f"{track_name}_ci_upper"] = float(quality_stats.get("ci_upper", 0.0))
    
    return ci_data


def extract_coverage_information(tracks: Dict) -> Dict:
    """Extract coverage information from each track."""
    
    coverage = {}
    
    for track_name in EVALUATION_TRACKS.keys():
        track_data = tracks.get(track_name, {})
        summary = track_data.get("summary", {})
        
        coverage[f"{track_name}_samples"] = int(summary.get("total_samples", 0))
        coverage[f"{track_name}_pairs"] = int(summary.get("language_pairs_evaluated", 0))
    
    return coverage


def serialize_detailed_results(tracks: Dict) -> Dict:
    """Serialize detailed results for storage."""
    
    detailed = {}
    
    for track_name in EVALUATION_TRACKS.keys():
        track_data = tracks.get(track_name, {})
        
        # Create simplified detailed results for storage
        simple_track_data = {
            "pair_metrics": track_data.get("pair_metrics", {}),
            "track_averages": track_data.get("track_averages", {}),
            "track_confidence": track_data.get("track_confidence", {}),
            "summary": track_data.get("summary", {})
        }
        
        detailed[f"detailed_{track_name}"] = json.dumps(simple_track_data)
    
    return detailed


def get_track_leaderboard(
    df: pd.DataFrame, 
    track: str, 
    metric: str = "quality",
    category_filter: str = "all"
) -> pd.DataFrame:
    """Get leaderboard for a specific track with filtering."""
    
    if df.empty:
        return df
    
    track_quality_col = f"{track}_{metric}"
    
    # Ensure columns exist
    if track_quality_col not in df.columns:
        print(f"Warning: Missing column for track {track}")
        return pd.DataFrame()
    
    # Filter by category
    if category_filter != "all":
        df = df[df["model_category"] == category_filter]
    
    # Filter to models that have this track
    quality_mask = pd.to_numeric(df[track_quality_col], errors='coerce') > 0
    df = df[quality_mask]
    
    if df.empty:
        return df
    
    # Sort by track-specific metric
    df = df.sort_values(track_quality_col, ascending=False).reset_index(drop=True)
    
    return df


def prepare_leaderboard_display(df: pd.DataFrame, track: str) -> pd.DataFrame:
    """Prepare track-specific leaderboard for display."""
    
    if df.empty:
        return df
    
    # Select relevant columns for this track
    base_columns = ["model_name", "author", "submission_date", "model_category"]
    
    track_columns = [
        f"{track}_quality",
        f"{track}_bleu", 
        f"{track}_chrf",
        f"{track}_ci_lower",
        f"{track}_ci_upper",
        f"{track}_samples",
        f"{track}_pairs",
    ]
    
    # Only include columns that exist
    available_columns = [col for col in base_columns + track_columns if col in df.columns]
    display_df = df[available_columns].copy()
    
    # Format numeric columns
    numeric_format = {
        f"{track}_quality": "{:.4f}",
        f"{track}_bleu": "{:.2f}",
        f"{track}_chrf": "{:.4f}",
        f"{track}_ci_lower": "{:.4f}",
        f"{track}_ci_upper": "{:.4f}",
    }
    
    for col, fmt in numeric_format.items():
        if col in display_df.columns:
            display_df[col] = display_df[col].apply(
                lambda x: fmt.format(float(x)) if pd.notnull(x) else "0.0000"
            )
    
    # Format confidence intervals
    if f"{track}_ci_lower" in display_df.columns and f"{track}_ci_upper" in display_df.columns:
        display_df[f"{track}_confidence_interval"] = (
            "[" + display_df[f"{track}_ci_lower"] + ", " + display_df[f"{track}_ci_upper"] + "]"
        )
        # Remove individual CI columns for cleaner display
        display_df = display_df.drop(columns=[f"{track}_ci_lower", f"{track}_ci_upper"])
    
    # Format submission date
    if "submission_date" in display_df.columns:
        display_df["submission_date"] = pd.to_datetime(display_df["submission_date"]).dt.strftime("%Y-%m-%d")
    
    # Rename columns for better display
    track_name = EVALUATION_TRACKS[track]["name"].split()[0]  # First word
    column_renames = {
        "model_name": "Model Name",
        "author": "Author",
        "submission_date": "Submitted",
        "model_category": "Category",
        f"{track}_quality": f"{track_name} Quality",
        f"{track}_bleu": f"{track_name} BLEU",
        f"{track}_chrf": f"{track_name} ChrF",
        f"{track}_confidence_interval": "95% CI",
        f"{track}_samples": "Samples",
        f"{track}_pairs": "Pairs",
    }
    
    display_df = display_df.rename(columns=column_renames)
    
    return display_df