joshause commited on
Commit
cf27955
·
verified ·
1 Parent(s): e460e98

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +478 -0
app.py ADDED
@@ -0,0 +1,478 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import torch
3
+ from transformers import AutoModelForCausalLM, AutoTokenizer
4
+ from peft import PeftModel
5
+ from datasets import load_dataset
6
+ import numpy as np
7
+ from tqdm import tqdm
8
+ import json
9
+ from huggingface_hub import hf_hub_download, login
10
+ import os
11
+
12
+ # Retrieve the token from the environment variable
13
+ hf_token = os.environ.get("HF_TOKEN")
14
+ if hf_token:
15
+ login(token=hf_token)
16
+ print("Successfully logged in to Hugging Face Hub!")
17
+ else:
18
+ print("HF_TOKEN not found in environment variables. Cannot authenticate.")
19
+
20
+ def load_models(target_model, target_adapter):
21
+ """Load base model and fine-tuned adapter"""
22
+ base_model_name = target_model
23
+ adapter_name = target_adapter
24
+
25
+ # Load tokenizer
26
+ tokenizer = AutoTokenizer.from_pretrained(base_model_name)
27
+ tokenizer.pad_token = tokenizer.eos_token
28
+
29
+ # Load base model
30
+ base_model = AutoModelForCausalLM.from_pretrained(
31
+ base_model_name,
32
+ torch_dtype=torch.bfloat16,
33
+ device_map="auto"
34
+ )
35
+
36
+ # Load fine-tuned model
37
+ ft_model = PeftModel.from_pretrained(base_model, adapter_name)
38
+
39
+ return tokenizer, base_model, ft_model
40
+
41
+ def evaluate_mmlu_sample(model, tokenizer, question, choices, answer_idx):
42
+ """Evaluate a single MMLU sample"""
43
+
44
+ # Format prompt with choices
45
+ prompt = f"Question: {question}\n"
46
+ for i, choice in enumerate(choices):
47
+ prompt += f"{chr(65+i)}: {choice}\n"
48
+ prompt += "Answer:"
49
+
50
+ # Tokenize
51
+ inputs = tokenizer(prompt, return_tensors="pt", truncation=True, max_length=512)
52
+
53
+ with torch.no_grad():
54
+ # Get logits for next token
55
+ outputs = model(**inputs)
56
+ logits = outputs.logits[0, -1, :] # Last token logits
57
+
58
+ # Get probabilities for A, B, C, D tokens
59
+ choice_tokens = [tokenizer.encode(chr(65+i), add_special_tokens=False)[0] for i in range(4)]
60
+ choice_logits = [logits[token].item() for token in choice_tokens]
61
+
62
+ # Predict choice with highest logit
63
+ predicted_idx = np.argmax(choice_logits)
64
+
65
+ return predicted_idx == answer_idx
66
+
67
+ def run_mmlu_evaluation(model, tokenizer, model_name, count):
68
+ """Run MMLU evaluation on a balanced subset across all subjects"""
69
+
70
+ # Load MMLU dataset
71
+ dataset = load_dataset("cais/mmlu", "all", split="test")
72
+
73
+ # Get all unique subjects
74
+ all_subjects = list(set(dataset['subject']))
75
+ num_subjects = len(all_subjects)
76
+
77
+ # Calculate samples per subject (rounded down)
78
+ samples_per_subject = int(np.floor(count / num_subjects))
79
+
80
+ print(f"Found {num_subjects} subjects in MMLU dataset")
81
+ print(f"Sampling {samples_per_subject} questions per subject")
82
+ print(f"Total samples: {samples_per_subject * num_subjects}")
83
+
84
+ # Create balanced subset
85
+ balanced_indices = []
86
+
87
+ for subject in all_subjects:
88
+ # Get all indices for this subject
89
+ subject_indices = [i for i, subj in enumerate(dataset['subject']) if subj == subject]
90
+
91
+ # Sample up to samples_per_subject questions from this subject
92
+ if len(subject_indices) >= samples_per_subject:
93
+ # If we have enough samples, randomly select samples_per_subject
94
+ selected_indices = np.random.choice(subject_indices, samples_per_subject, replace=False)
95
+ else:
96
+ # If we don't have enough samples, take all available
97
+ selected_indices = subject_indices
98
+ print(f"Warning: Subject '{subject}' only has {len(subject_indices)} samples (requested {samples_per_subject})")
99
+
100
+ balanced_indices.extend(selected_indices.tolist())
101
+
102
+ # Create balanced dataset
103
+ dataset = dataset.select(balanced_indices)
104
+
105
+ # Shuffle the final dataset
106
+ shuffled_indices = np.random.permutation(len(dataset))
107
+ dataset = dataset.select(shuffled_indices)
108
+
109
+ correct = 0
110
+ total = 0
111
+
112
+ subject_scores = {}
113
+
114
+ print(f"Evaluating {model_name} on {len(dataset)} balanced MMLU samples...")
115
+
116
+ for sample in tqdm(dataset):
117
+ subject = sample['subject']
118
+ question = sample['question']
119
+ choices = sample['choices']
120
+ answer = sample['answer'] # Index of correct answer
121
+
122
+ # Evaluate sample
123
+ is_correct = evaluate_mmlu_sample(model, tokenizer, question, choices, answer)
124
+
125
+ # Track overall accuracy
126
+ if is_correct:
127
+ correct += 1
128
+ total += 1
129
+
130
+ # Track per-subject accuracy
131
+ if subject not in subject_scores:
132
+ subject_scores[subject] = {'correct': 0, 'total': 0}
133
+
134
+ subject_scores[subject]['total'] += 1
135
+ if is_correct:
136
+ subject_scores[subject]['correct'] += 1
137
+
138
+ # Calculate final scores
139
+ overall_accuracy = (correct / total) * 100
140
+
141
+ subject_accuracies = {}
142
+ for subject, scores in subject_scores.items():
143
+ subject_accuracies[subject] = (scores['correct'] / scores['total']) * 100
144
+
145
+ return overall_accuracy, subject_accuracies
146
+
147
+ def run_comparison(target_model, target_adapter, count):
148
+
149
+ progress = gr.Progress()
150
+
151
+ # count of questions per MMLU subject from form needs to be multiplied by number of subjects
152
+ count = int(count) * 57
153
+ print(f"Running evaluation on {count} MMLU questions...")
154
+
155
+ progress(0.01, desc="Loading models...")
156
+
157
+ print("Loading models...")
158
+ tokenizer, base_model, ft_model = load_models(target_model, target_adapter)
159
+
160
+ progress(0.1, desc="Evaluating base model...")
161
+
162
+ # Evaluate base model
163
+ print("Evaluating base model...")
164
+ base_accuracy, base_subjects = run_mmlu_evaluation(base_model, tokenizer, f"{target_model}", count)
165
+
166
+ progress(0.5, desc="Evaluating fine-tuned adapter...")
167
+
168
+ # Evaluate fine-tuned model
169
+ print("Evaluating fine-tuned adapter...")
170
+ ft_accuracy, ft_subjects = run_mmlu_evaluation(ft_model, tokenizer, f"{target_adapter}", count)
171
+
172
+ progress(1, desc="Evaluation complete.")
173
+
174
+ output = ''
175
+
176
+ # Print overall results
177
+
178
+ output += f"## flexMMLU EVALUATION RESULTS - OVERALL PERFORMANCE\n"
179
+ output += f"### Base Model Accuracy: {base_accuracy:.2f}%\n"
180
+ output += f"### Adapter Accuracy: {ft_accuracy:.2f}%\n"
181
+ output += f"### Performance Change: {ft_accuracy - base_accuracy:+.2f}%\n"
182
+ output += f"## SUBJECT-BY-SUBJECT PERFORMANCE COMPARISON\n"
183
+ output += f"| {'Subject':<35} | {'Base %':<10} | {'Adapter %':<15} | {'Change':<10} | {'Status'} |\n"
184
+ output += f"|---|---|---|---|---|\n"
185
+
186
+
187
+ # Get all subjects (union of both dictionaries in case there are differences)
188
+ all_subjects = sorted(set(base_subjects.keys()) | set(ft_subjects.keys()))
189
+
190
+ # Track statistics
191
+ improvements = 0
192
+ degradations = 0
193
+ no_changes = 0
194
+ total_change = 0
195
+
196
+ subject_changes = []
197
+
198
+ # Print each subject
199
+ for subject in all_subjects:
200
+ # Get scores (default to 0 if subject missing from either model)
201
+ base_score = base_subjects.get(subject, 0.0)
202
+ ft_score = ft_subjects.get(subject, 0.0)
203
+ change = ft_score - base_score
204
+
205
+ # Determine status
206
+ if change > 0:
207
+ status = "📈 IMPROVED"
208
+ improvements += 1
209
+ elif change < 0:
210
+ status = "📉 DECLINED"
211
+ degradations += 1
212
+ else:
213
+ status = "➡️ NO CHANGE"
214
+ no_changes += 1
215
+
216
+ # Format subject name (replace underscores, capitalize)
217
+ formatted_subject = subject.replace('_', ' ').title()
218
+
219
+ output += f"| {formatted_subject:<35} | {base_score:<10.2f} | {ft_score:<15.2f} | {change:<+10.2f} | {status} |\n"
220
+
221
+ subject_changes.append((formatted_subject, change))
222
+ total_change += change
223
+
224
+ # Print summary statistics
225
+ output += f"## SUMMARY STATISTICS\n"
226
+ output += f"#### Total Subjects Evaluated: {len(all_subjects)}\n"
227
+ output += f"### Subjects Improved: {improvements} ({improvements/len(all_subjects)*100:.1f}%)\n"
228
+ output += f"### Subjects Declined: {degradations} ({degradations/len(all_subjects)*100:.1f}%)\n"
229
+ output += f"### Subjects Unchanged: {no_changes} ({no_changes/len(all_subjects)*100:.1f}%)\n"
230
+ output += f"### Average Change per Subject: {total_change/len(all_subjects):+.2f}%\n"
231
+
232
+ # Show top improvements and declines
233
+ subject_changes.sort(key=lambda x: x[1], reverse=True)
234
+
235
+ output += f"### 🏆 TOP 10 MOST IMPROVED SUBJECTS\n"
236
+ for i, (subject, change) in enumerate(subject_changes[:10]):
237
+ output += f"{i+1:2d}. {subject:<30} {change:+.2f}%\n"
238
+
239
+ output += f"### ⚠️ TOP 10 MOST DECLINED SUBJECTS\n"
240
+ for i, (subject, change) in enumerate(subject_changes[-10:]):
241
+ output += f"{i+1:2d}. {subject:<30} {change:+.2f}%\n"
242
+
243
+ # Categories analysis (if we can group subjects)
244
+ output += f"### 📚 ANALYSIS BY ACADEMIC DOMAIN\n"
245
+
246
+ # Group subjects by broad categories
247
+ stem_keywords = ['math', 'physics', 'chemistry', 'biology', 'computer', 'engineering', 'statistics']
248
+ humanities_keywords = ['history', 'philosophy', 'literature', 'art', 'religion', 'culture']
249
+ social_keywords = ['psychology', 'sociology', 'economics', 'political', 'law', 'business']
250
+
251
+ stem_changes = []
252
+ humanities_changes = []
253
+ social_changes = []
254
+ other_changes = []
255
+
256
+ for subject, change in subject_changes:
257
+ subject_lower = subject.lower()
258
+ if any(keyword in subject_lower for keyword in stem_keywords):
259
+ stem_changes.append(change)
260
+ elif any(keyword in subject_lower for keyword in humanities_keywords):
261
+ humanities_changes.append(change)
262
+ elif any(keyword in subject_lower for keyword in social_keywords):
263
+ social_changes.append(change)
264
+ else:
265
+ other_changes.append(change)
266
+ # Print category averages
267
+ if stem_changes:
268
+ output += f"🔬 STEM Subjects: {np.mean(stem_changes):+.2f}% avg change ({len(stem_changes)} subjects)\n\n"
269
+ if humanities_changes:
270
+ output += f"📖 Humanities: {np.mean(humanities_changes):+.2f}% avg change ({len(humanities_changes)} subjects)\n\n"
271
+ if social_changes:
272
+ output += f"👥 Social Sciences: {np.mean(social_changes):+.2f}% avg change ({len(social_changes)} subjects)\n\n"
273
+ if other_changes:
274
+ output += f"📋 Other: {np.mean(other_changes):+.2f}% avg change ({len(other_changes)} subjects)\n\n"
275
+
276
+ return output
277
+
278
+ def get_base_model(adapter_repo):
279
+ """
280
+ Get base model for adapter via hub
281
+ """
282
+ try:
283
+ # Download adapter config from hub
284
+ config_path = hf_hub_download(repo_id=adapter_repo, filename="adapter_config.json")
285
+
286
+ with open(config_path, 'r') as f:
287
+ adapter_config = json.load(f)
288
+
289
+ base_model = adapter_config.get('base_model_name_or_path', '')
290
+
291
+ if base_model:
292
+ return base_model
293
+ else:
294
+ print(f"❌ Base model not found")
295
+ return "Base model not found"
296
+
297
+ except Exception as e:
298
+ print(f"Hub base model check failed: {e}")
299
+ return "Base model not found"
300
+
301
+ # Gradio interface
302
+ def disable_button():
303
+ return gr.Button(interactive=False)
304
+
305
+ def enable_button():
306
+ return gr.Button(interactive=True)
307
+
308
+ def update_evaluate_button(base_model):
309
+ if base_model != "Base model not found":
310
+ return gr.Button(interactive=True)
311
+ else:
312
+ return gr.Button(interactive=False)
313
+
314
+ def process_text_to_markdown(text_input):
315
+ """
316
+ This function takes text from a Textbox and returns it for Markdown display.
317
+ """
318
+ return text_input
319
+
320
+ def hide_output_text(text_input):
321
+ return gr.Textbox(
322
+ visible=False
323
+ )
324
+
325
+ def set_moe(count):
326
+ if count == "1":
327
+ return gr.Radio(value="8-12%")
328
+ elif count == "2":
329
+ return gr.Radio(value="6-9%")
330
+ elif count == "5":
331
+ return gr.Radio(value="4-6%")
332
+ elif count == "10":
333
+ return gr.Radio(value="3-4%")
334
+ elif count == "20":
335
+ return gr.Radio(value="2-3%")
336
+ elif count == "50":
337
+ return gr.Radio(value="1.5-2%")
338
+ elif count == "100":
339
+ return gr.Radio(value="1-1.5%")
340
+
341
+ with gr.Blocks() as interface:
342
+
343
+ # Add a title and description for the app.
344
+ gr.Markdown("# flexMMLU Fine-tune vs. Base Model Evaluator")
345
+ gr.Markdown("### A fast, flexible, fully subject-balanced MMLU benchmark comparison tool for fine-tuned adapters and their base models")
346
+ gr.Markdown("Compare your adapter to its base model head-to-head in just minutes using this accelerated [**MMLU (Massive Multitask Language Understanding)**](https://huggingface.co/datasets/cais/mmlu) benchmark evaluation.")
347
+ gr.Markdown("This app uses an efficient and effective, **subject-balanced random sampling**, zero-shot **evaluation across all 57 academic and professional subjects** in the MMLU. It provides a relative performance comparison of an adapter and its base model with a side-by-side score breakdown of overall, subject-by-subject, and domain-by-domain scores.")
348
+ gr.Markdown("Whether you’re validating a new LoRA, demonstrating gains for a project, or just curious how much your fine-tuning really moved the needle, this tool gives you a **fast, standardized report and visualization** of its results.")
349
+ with gr.Accordion("Want it even faster?", open=False):
350
+ gr.Markdown("This Space runs on the free “CPU basic” (2 vCPU, 16 GB RAM) hardware option. You can duplicate this Space in your own account and configure it to use a more powerful paid hardware configuration for an even faster evaluation.")
351
+ with gr.Accordion("Using private or gated models?", open=False):
352
+ gr.Markdown("In order to use your own private models or public gated models, obtain necessary gated model user access, duplicate this space, and set your own account HF_TOKEN environment variable/secret for the duplicated space.")
353
+
354
+ with gr.Tab("Evaluation Tool"):
355
+ # Add input components for the user to set the adapter and base model.
356
+
357
+ with gr.Row():
358
+ adapter = gr.Textbox(
359
+ label="Adapter (required)",
360
+ show_label=True,
361
+ max_lines=1,
362
+ placeholder="adapter account/name (e.g., highlowmedia/gpt-oss-20b-lora-enlightened)",
363
+ container=True,
364
+ scale=10,
365
+ )
366
+ submit_adapter_button = gr.Button(
367
+ "Get Base Model",
368
+ scale=0,
369
+ variant="secondary"
370
+ )
371
+
372
+ with gr.Row():
373
+ base_model = gr.Textbox(
374
+ label="Base Model (auto-filled)",
375
+ max_lines=1,
376
+ interactive=False,
377
+ container=True,
378
+ )
379
+
380
+ submit_adapter_button.click(
381
+ fn=get_base_model,
382
+ inputs=adapter,
383
+ outputs=base_model
384
+ )
385
+
386
+ questions_per_subject_count = gr.Radio(
387
+ ["1", "2", "5", "10", "20", "50", "100"],
388
+ value="20",
389
+ label="Questions Per MMLU Subject",
390
+ interactive=True,
391
+ info="The fewer questions per subject, the sooner the evaluation will finish; however, the trade-off is an inverse increase in the probable margin of error for the results. 20 to 50 questions per subject is recommended for a generally accepted margin of error of ~2%. (Read the 'Speed vs. Accuracy' section for more information.)"
392
+ )
393
+ margin_of_error_display = gr.Radio(
394
+ ["8-12%", "6-9%", "4-6%", "3-4%", "2-3%", "1.5-2%", "1-1.5%"],
395
+ value="2-3%",
396
+ label="Margin of Error (auto-filled)",
397
+ interactive=False
398
+ )
399
+ questions_per_subject_count.change(
400
+ fn=set_moe,
401
+ inputs=questions_per_subject_count,
402
+ outputs=margin_of_error_display
403
+ )
404
+
405
+ # Add a button to trigger the evaluation.
406
+ evaluate_button = gr.Button(
407
+ "Evaluate",
408
+ variant="primary",
409
+ interactive=False
410
+ )
411
+
412
+ output_text = gr.Textbox(
413
+ label="",
414
+ show_label=False,
415
+ lines=5,
416
+ interactive=False,
417
+ show_copy_button=True,
418
+ )
419
+
420
+ output_text_markdown = gr.Markdown("""
421
+
422
+ """)
423
+
424
+ evaluate_button.click(
425
+ fn=disable_button,
426
+ inputs=None,
427
+ outputs=evaluate_button,
428
+ queue=False
429
+ ).then(
430
+ fn=run_comparison,
431
+ inputs=[
432
+ base_model,
433
+ adapter,
434
+ questions_per_subject_count
435
+ ],
436
+ outputs=output_text
437
+ ).then(
438
+ fn=process_text_to_markdown,
439
+ inputs=output_text,
440
+ outputs=output_text_markdown
441
+ ).then(
442
+ fn=enable_button,
443
+ inputs=None,
444
+ outputs=evaluate_button,
445
+ queue=False
446
+ ).then(
447
+ fn=hide_output_text,
448
+ inputs=output_text,
449
+ outputs=output_text
450
+ )
451
+
452
+ # turn on evaluate button only after base model found
453
+ # base_model textbox can only be changed via get_base_model via submit_adapter_button
454
+ base_model.change(fn=update_evaluate_button, inputs=base_model, outputs=evaluate_button)
455
+
456
+ with gr.Tab("Speed vs. Accuracy"):
457
+ gr.Markdown("The balanced random approach of flexMMLU represents a practical middle ground between computational efficiency and methodological simplicity (in contrast with human-curated subset generation for instance). While you can perhaps go too far in your quest for speed and compuational efficiency, this tool gives you the option to choose the degree of trade-off between speed and accuracy that suits your needs.")
458
+ gr.Markdown("A scaling formula for statistical error based upon sample size informs the table of estimates below which also reflects the evaluation tool configuration options. While the lowest subset size of 1 question per MMLU subject is likely not useful for your purposes (unless you want extreme computational efficiency that still well outperforms guessing), it should be noted that there are diminishing returns on performance at 100 questions or more per subject.")
459
+ gr.Markdown("""
460
+ | Subset Size | Questions per Subject | Estimated Error Range |
461
+ |-------------|----------------------|----------------------|
462
+ | 57 | 1 | 8-12% |
463
+ | 114 | 2 | 6-9% |
464
+ | 285 | 5 | 4-6% |
465
+ | 570 | 10 | 3-4% |
466
+ | 1,140 | 20 | 2-3% |
467
+ | 2,850 | 50 | 1.5-2% |
468
+ | 5,700 | 100 | 1-1.5% |
469
+ """)
470
+ with gr.Tab("flexMMLU"):
471
+ gr.Markdown("The foundational LLM benchmark MMLU (Massive Multitask Language Understanding), contains 14,042 test questions on 57 different subjects across STEM, humanities, social sciences, and professional fields. The breadth of it’s subject matter makes it a still powerful tool for zero-shot and few-shot evaluations of large language models without extensive task-specific training or instructions, but it’s a large dataset, and using it can be computationally expensive and time-consuming.")
472
+ gr.Markdown("Inspired by tinyMMLU and its conceptual precursors, flexMMLU is implemented here as an MMLU accelerator with margin of error mitigtaion that is grounded upon the utility of “stratified random sampling” - which is to say, random but balanced selection from “sub-scenarios” - MMLU subjects in our case. ([tinyBenchmarks: evaluating LLMs with fewer examples](https://arxiv.org/abs/2402.14992), Felipe Maia Polo, et al. 2024)")
473
+ gr.Markdown("Working with a subset of MMLU, regardless of how that subset is generated, mitigates computational overhead but, of course, introduces added margin of error. Interestingly, the balanced random sampling of flexMMLU, by which there is equal representation across subjects in the subset, not only generally mitigates added margin of error relative to the use of a subset of the full MMLU dataset but may give flexMMLU a performance advantage over MMLU due to the composition of MMLU itself - specifically, with respect to the uneven distribution of questions across domains in the full MMLU test dataset.")
474
+ gr.Markdown("All subjects in the full dataset are reprsented by at least 100 questions, but some some subjects have as many as 6 times as many questions as others. This imbalance is overcome in flexMMLU, and, accordingly, it would be fair to assume (though not scientifically verified) that flexMMLU makes inroads here wiht repsect to closing the margin of error gap with MMLU. flexMMLU prevents bias from subject representation imbalance and ultimately provides more meaningful subject-by-subject comparisons.")
475
+ gr.Markdown("*There has been identification of errors in MMLU over the years as well - an impetus for new “massive” formulations like MMLU-Pro - and not surprisingly, it has been reported that the errors in MMLU are not evenly represented across domains either. This last matter is not mitigated by aotomated balanced subset generation from original MMLU dataset used here.*")
476
+
477
+ if __name__ == "__main__":
478
+ interface.launch()