Removed tutorial.mov to meet Hugging Face size limits
Browse files- .gitignore +7 -0
- app.py +59 -76
- visuals/layout.py +14 -16
- visuals/score_card.py +30 -63
.gitignore
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
__pycache__/
|
| 2 |
+
*.pyc
|
| 3 |
+
.vscode/
|
| 4 |
+
*.mov
|
| 5 |
+
*.mp4
|
| 6 |
+
.DS_Store
|
| 7 |
+
.env
|
app.py
CHANGED
|
@@ -1,13 +1,11 @@
|
|
| 1 |
import gradio as gr
|
| 2 |
import nltk
|
| 3 |
-
import os
|
| 4 |
import pandas as pd
|
| 5 |
-
from nltk.tokenize import TreebankWordTokenizer
|
| 6 |
from sklearn.metrics.pairwise import cosine_similarity
|
| 7 |
from sentence_transformers import SentenceTransformer
|
| 8 |
-
import graphviz
|
| 9 |
from typing import Tuple, Optional
|
| 10 |
-
|
|
|
|
| 11 |
from visuals.layout import (
|
| 12 |
render_page_header,
|
| 13 |
render_core_reference,
|
|
@@ -15,48 +13,46 @@ from visuals.layout import (
|
|
| 15 |
render_pipeline_graph,
|
| 16 |
render_pipeline_warning,
|
| 17 |
render_strategy_alignment,
|
| 18 |
-
)
|
| 19 |
|
| 20 |
-
#
|
| 21 |
try:
|
| 22 |
nltk.download("punkt", quiet=True)
|
| 23 |
except Exception as e:
|
| 24 |
print(f"Error downloading NLTK data: {e}")
|
| 25 |
|
| 26 |
-
# Load
|
| 27 |
model = SentenceTransformer("all-MiniLM-L6-v2")
|
| 28 |
|
|
|
|
|
|
|
|
|
|
| 29 |
|
|
|
|
| 30 |
def calculate_ttr(text: str) -> float:
|
| 31 |
-
"""Calculates Type-Token Ratio (TTR) for lexical diversity."""
|
| 32 |
-
if not text:
|
| 33 |
-
return 0.0
|
| 34 |
words = text.split()
|
| 35 |
unique_words = set(words)
|
| 36 |
return len(unique_words) / len(words) if words else 0.0
|
| 37 |
|
| 38 |
|
| 39 |
def calculate_similarity(text1: str, text2: str) -> float:
|
| 40 |
-
"""Calculates cosine similarity between two texts."""
|
| 41 |
embeddings = model.encode([text1, text2])
|
| 42 |
return cosine_similarity([embeddings[0]], [embeddings[1]])[0][0]
|
| 43 |
|
| 44 |
|
| 45 |
def calculate_mad_score(ttr: float, similarity: float) -> float:
|
| 46 |
-
"""Calculates the MAD score."""
|
| 47 |
return 0.3 * (1 - ttr) + 0.7 * similarity
|
| 48 |
|
| 49 |
|
| 50 |
def get_risk_level(mad_score: float) -> str:
|
| 51 |
-
"""Determines the risk level based on the MAD score."""
|
| 52 |
if mad_score > 0.7:
|
| 53 |
return "High"
|
| 54 |
elif 0.4 <= mad_score <= 0.7:
|
| 55 |
return "Medium"
|
| 56 |
-
|
| 57 |
-
return "Low"
|
| 58 |
|
| 59 |
|
|
|
|
| 60 |
def process_data(file_obj, model_col: str, train_col: str, data_source: str) -> Tuple[
|
| 61 |
Optional[str],
|
| 62 |
Optional[bytes],
|
|
@@ -66,12 +62,10 @@ def process_data(file_obj, model_col: str, train_col: str, data_source: str) ->
|
|
| 66 |
Optional[float],
|
| 67 |
Optional[float],
|
| 68 |
]:
|
| 69 |
-
"""Processes the uploaded file and calculates metrics."""
|
| 70 |
try:
|
| 71 |
if not file_obj:
|
| 72 |
return "Error: No file uploaded.", None, None, None, None, None, None
|
| 73 |
|
| 74 |
-
global uploaded_df
|
| 75 |
df = uploaded_df.get("data")
|
| 76 |
if df is None:
|
| 77 |
return "Error: File not yet processed.", None, None, None, None, None, None
|
|
@@ -110,25 +104,22 @@ def process_data(file_obj, model_col: str, train_col: str, data_source: str) ->
|
|
| 110 |
ttr_train,
|
| 111 |
similarity,
|
| 112 |
)
|
|
|
|
| 113 |
except Exception as e:
|
| 114 |
return f"An error occurred: {str(e)}", None, None, None, None, None, None
|
| 115 |
|
| 116 |
|
| 117 |
-
#
|
| 118 |
-
uploaded_df = {}
|
| 119 |
-
|
| 120 |
-
|
| 121 |
def update_dropdowns(file_obj) -> Tuple[gr.Dropdown, gr.Dropdown, str]:
|
| 122 |
global uploaded_df
|
| 123 |
if not file_obj:
|
| 124 |
-
uploaded_df["data"] = None
|
| 125 |
return (
|
| 126 |
gr.update(choices=[], value=None),
|
| 127 |
gr.update(choices=[], value=None),
|
| 128 |
"No file uploaded.",
|
| 129 |
)
|
| 130 |
|
| 131 |
-
# Read the file and extract columns
|
| 132 |
try:
|
| 133 |
file_name = getattr(file_obj, "name", "")
|
| 134 |
if file_name.endswith(".csv"):
|
|
@@ -143,12 +134,10 @@ def update_dropdowns(file_obj) -> Tuple[gr.Dropdown, gr.Dropdown, str]:
|
|
| 143 |
)
|
| 144 |
|
| 145 |
uploaded_df["data"] = df
|
| 146 |
-
columns = df.columns.tolist()
|
| 147 |
preview = df.head().to_markdown(index=False, numalign="left", stralign="left")
|
| 148 |
-
|
| 149 |
return (
|
| 150 |
-
gr.update(choices=columns, value=None),
|
| 151 |
-
gr.update(choices=columns, value=None),
|
| 152 |
preview,
|
| 153 |
)
|
| 154 |
|
|
@@ -161,22 +150,22 @@ def update_dropdowns(file_obj) -> Tuple[gr.Dropdown, gr.Dropdown, str]:
|
|
| 161 |
|
| 162 |
|
| 163 |
def clear_all_fields():
|
| 164 |
-
|
| 165 |
-
uploaded_df.clear() # Clear stored DataFrame
|
| 166 |
return (
|
| 167 |
-
None,
|
| 168 |
-
gr.update(choices=[], value=None),
|
| 169 |
-
gr.update(choices=[], value=None),
|
| 170 |
-
"",
|
| 171 |
-
"",
|
| 172 |
-
"",
|
| 173 |
-
None,
|
| 174 |
-
None,
|
| 175 |
-
None,
|
| 176 |
-
render_pipeline_graph("Synthetic Generated Data"),
|
| 177 |
)
|
| 178 |
|
| 179 |
|
|
|
|
| 180 |
def main_interface():
|
| 181 |
css = """
|
| 182 |
.gradio-container {
|
|
@@ -194,38 +183,39 @@ def main_interface():
|
|
| 194 |
with gr.Blocks(css=css, title="MADGuard AI Explorer") as interface:
|
| 195 |
gr.HTML(render_page_header())
|
| 196 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 197 |
gr.Markdown(
|
| 198 |
"""
|
| 199 |
-
> 🧠 **MADGuard AI Explorer** helps
|
| 200 |
|
| 201 |
- Compare **real vs. synthetic input effects**
|
| 202 |
- Visualize the data flow
|
| 203 |
- Upload your `.csv` or `.json` data
|
| 204 |
-
- Get
|
| 205 |
"""
|
| 206 |
)
|
| 207 |
|
| 208 |
with gr.Accordion("📚 Research Reference", open=False):
|
| 209 |
gr.HTML(render_core_reference())
|
| 210 |
-
gr.HTML(
|
| 211 |
-
"""
|
| 212 |
-
<div style="display: flex; flex-direction: column; align-items: center; margin-bottom: 20px;">
|
| 213 |
-
<h3 style="text-align: center;">📽️ How to Use MADGuard AI Explorer</h3>
|
| 214 |
-
<iframe width="720" height="405"
|
| 215 |
-
src="https://www.youtube.com/embed/qjMwvaBXQeY"
|
| 216 |
-
title="MADGuard AI Tutorial" frameborder="0"
|
| 217 |
-
allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture"
|
| 218 |
-
allowfullscreen></iframe>
|
| 219 |
-
</div>
|
| 220 |
-
"""
|
| 221 |
-
)
|
| 222 |
|
| 223 |
gr.Markdown("## 1. Pipeline Simulation")
|
| 224 |
data_source, description = render_pipeline(default="Synthetic Generated Data")
|
| 225 |
-
|
| 226 |
gr.HTML(description)
|
|
|
|
| 227 |
pipeline_output = gr.Image(type="filepath", label="Pipeline Graph")
|
| 228 |
warning_output = gr.HTML()
|
|
|
|
| 229 |
data_source.change(
|
| 230 |
fn=render_pipeline_warning, inputs=data_source, outputs=warning_output
|
| 231 |
)
|
|
@@ -233,9 +223,7 @@ def main_interface():
|
|
| 233 |
fn=render_pipeline_graph, inputs=data_source, outputs=pipeline_output
|
| 234 |
)
|
| 235 |
interface.load(
|
| 236 |
-
fn=render_pipeline_graph,
|
| 237 |
-
inputs=[data_source],
|
| 238 |
-
outputs=[pipeline_output],
|
| 239 |
)
|
| 240 |
|
| 241 |
gr.Markdown("## 2. Upload CSV or JSON File")
|
|
@@ -246,28 +234,23 @@ def main_interface():
|
|
| 246 |
|
| 247 |
gr.Markdown(
|
| 248 |
"""
|
| 249 |
-
|
| 250 |
-
|
| 251 |
-
|
| 252 |
-
|
| 253 |
-
"""
|
| 254 |
)
|
| 255 |
|
| 256 |
with gr.Row():
|
| 257 |
model_col_input = gr.Dropdown(
|
| 258 |
-
choices=[],
|
| 259 |
-
value=None,
|
| 260 |
-
label="Select column for model output",
|
| 261 |
-
interactive=True,
|
| 262 |
)
|
| 263 |
train_col_input = gr.Dropdown(
|
| 264 |
choices=[],
|
| 265 |
-
value=None,
|
| 266 |
label="Select column for future training data",
|
| 267 |
interactive=True,
|
| 268 |
)
|
| 269 |
-
file_preview = gr.Markdown(label="📄 File Preview")
|
| 270 |
|
|
|
|
| 271 |
output_markdown = gr.Markdown(label="🔍 Evaluation Summary")
|
| 272 |
|
| 273 |
with gr.Accordion("📋 Research-Based Strategy Alignment", open=False):
|
|
@@ -290,7 +273,7 @@ def main_interface():
|
|
| 290 |
)
|
| 291 |
|
| 292 |
def process_and_generate(
|
| 293 |
-
file_obj, model_col_val
|
| 294 |
):
|
| 295 |
error, graph, preview, markdown, ttr_out, ttr_tr, sim = process_data(
|
| 296 |
file_obj, model_col_val, train_col_val, data_source_val
|
|
@@ -319,6 +302,7 @@ def main_interface():
|
|
| 319 |
ttr_train_metric,
|
| 320 |
similarity_metric,
|
| 321 |
]
|
|
|
|
| 322 |
clear_btn.click(
|
| 323 |
fn=clear_all_fields,
|
| 324 |
inputs=[],
|
|
@@ -344,19 +328,18 @@ def main_interface():
|
|
| 344 |
gr.Markdown("---")
|
| 345 |
gr.Markdown(
|
| 346 |
"""
|
| 347 |
-
|
| 348 |
-
|
| 349 |
-
|
| 350 |
-
|
| 351 |
|
| 352 |
-
|
| 353 |
-
|
| 354 |
)
|
| 355 |
|
| 356 |
return interface
|
| 357 |
|
| 358 |
|
| 359 |
-
# Launch the Gradio interface
|
| 360 |
if __name__ == "__main__":
|
| 361 |
interface = main_interface()
|
| 362 |
interface.launch(server_name="0.0.0.0", server_port=7860)
|
|
|
|
| 1 |
import gradio as gr
|
| 2 |
import nltk
|
|
|
|
| 3 |
import pandas as pd
|
|
|
|
| 4 |
from sklearn.metrics.pairwise import cosine_similarity
|
| 5 |
from sentence_transformers import SentenceTransformer
|
|
|
|
| 6 |
from typing import Tuple, Optional
|
| 7 |
+
|
| 8 |
+
from visuals.score_card import render_score_card
|
| 9 |
from visuals.layout import (
|
| 10 |
render_page_header,
|
| 11 |
render_core_reference,
|
|
|
|
| 13 |
render_pipeline_graph,
|
| 14 |
render_pipeline_warning,
|
| 15 |
render_strategy_alignment,
|
| 16 |
+
)
|
| 17 |
|
| 18 |
+
# Download tokenizer if not already available
|
| 19 |
try:
|
| 20 |
nltk.download("punkt", quiet=True)
|
| 21 |
except Exception as e:
|
| 22 |
print(f"Error downloading NLTK data: {e}")
|
| 23 |
|
| 24 |
+
# Load embedding model
|
| 25 |
model = SentenceTransformer("all-MiniLM-L6-v2")
|
| 26 |
|
| 27 |
+
# Global state to store uploaded DataFrame
|
| 28 |
+
uploaded_df = {}
|
| 29 |
+
|
| 30 |
|
| 31 |
+
# --- Core Metrics ---
|
| 32 |
def calculate_ttr(text: str) -> float:
|
|
|
|
|
|
|
|
|
|
| 33 |
words = text.split()
|
| 34 |
unique_words = set(words)
|
| 35 |
return len(unique_words) / len(words) if words else 0.0
|
| 36 |
|
| 37 |
|
| 38 |
def calculate_similarity(text1: str, text2: str) -> float:
|
|
|
|
| 39 |
embeddings = model.encode([text1, text2])
|
| 40 |
return cosine_similarity([embeddings[0]], [embeddings[1]])[0][0]
|
| 41 |
|
| 42 |
|
| 43 |
def calculate_mad_score(ttr: float, similarity: float) -> float:
|
|
|
|
| 44 |
return 0.3 * (1 - ttr) + 0.7 * similarity
|
| 45 |
|
| 46 |
|
| 47 |
def get_risk_level(mad_score: float) -> str:
|
|
|
|
| 48 |
if mad_score > 0.7:
|
| 49 |
return "High"
|
| 50 |
elif 0.4 <= mad_score <= 0.7:
|
| 51 |
return "Medium"
|
| 52 |
+
return "Low"
|
|
|
|
| 53 |
|
| 54 |
|
| 55 |
+
# --- Data Processing ---
|
| 56 |
def process_data(file_obj, model_col: str, train_col: str, data_source: str) -> Tuple[
|
| 57 |
Optional[str],
|
| 58 |
Optional[bytes],
|
|
|
|
| 62 |
Optional[float],
|
| 63 |
Optional[float],
|
| 64 |
]:
|
|
|
|
| 65 |
try:
|
| 66 |
if not file_obj:
|
| 67 |
return "Error: No file uploaded.", None, None, None, None, None, None
|
| 68 |
|
|
|
|
| 69 |
df = uploaded_df.get("data")
|
| 70 |
if df is None:
|
| 71 |
return "Error: File not yet processed.", None, None, None, None, None, None
|
|
|
|
| 104 |
ttr_train,
|
| 105 |
similarity,
|
| 106 |
)
|
| 107 |
+
|
| 108 |
except Exception as e:
|
| 109 |
return f"An error occurred: {str(e)}", None, None, None, None, None, None
|
| 110 |
|
| 111 |
|
| 112 |
+
# --- Helpers ---
|
|
|
|
|
|
|
|
|
|
| 113 |
def update_dropdowns(file_obj) -> Tuple[gr.Dropdown, gr.Dropdown, str]:
|
| 114 |
global uploaded_df
|
| 115 |
if not file_obj:
|
| 116 |
+
uploaded_df["data"] = None
|
| 117 |
return (
|
| 118 |
gr.update(choices=[], value=None),
|
| 119 |
gr.update(choices=[], value=None),
|
| 120 |
"No file uploaded.",
|
| 121 |
)
|
| 122 |
|
|
|
|
| 123 |
try:
|
| 124 |
file_name = getattr(file_obj, "name", "")
|
| 125 |
if file_name.endswith(".csv"):
|
|
|
|
| 134 |
)
|
| 135 |
|
| 136 |
uploaded_df["data"] = df
|
|
|
|
| 137 |
preview = df.head().to_markdown(index=False, numalign="left", stralign="left")
|
|
|
|
| 138 |
return (
|
| 139 |
+
gr.update(choices=df.columns.tolist(), value=None),
|
| 140 |
+
gr.update(choices=df.columns.tolist(), value=None),
|
| 141 |
preview,
|
| 142 |
)
|
| 143 |
|
|
|
|
| 150 |
|
| 151 |
|
| 152 |
def clear_all_fields():
|
| 153 |
+
uploaded_df.clear()
|
|
|
|
| 154 |
return (
|
| 155 |
+
None,
|
| 156 |
+
gr.update(choices=[], value=None),
|
| 157 |
+
gr.update(choices=[], value=None),
|
| 158 |
+
"",
|
| 159 |
+
"",
|
| 160 |
+
"",
|
| 161 |
+
None,
|
| 162 |
+
None,
|
| 163 |
+
None,
|
| 164 |
+
render_pipeline_graph("Synthetic Generated Data"),
|
| 165 |
)
|
| 166 |
|
| 167 |
|
| 168 |
+
# --- Interface ---
|
| 169 |
def main_interface():
|
| 170 |
css = """
|
| 171 |
.gradio-container {
|
|
|
|
| 183 |
with gr.Blocks(css=css, title="MADGuard AI Explorer") as interface:
|
| 184 |
gr.HTML(render_page_header())
|
| 185 |
|
| 186 |
+
gr.HTML(
|
| 187 |
+
"""
|
| 188 |
+
<div style="text-align:center; margin-bottom: 20px;">
|
| 189 |
+
<h3>📽️ How to Use MADGuard AI Explorer</h3>
|
| 190 |
+
<iframe width="560" height="315" src="https://www.youtube.com/embed/qjMwvaBXQeY"
|
| 191 |
+
title="Tutorial Video" frameborder="0"
|
| 192 |
+
allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture"
|
| 193 |
+
allowfullscreen></iframe>
|
| 194 |
+
</div>
|
| 195 |
+
"""
|
| 196 |
+
)
|
| 197 |
+
|
| 198 |
gr.Markdown(
|
| 199 |
"""
|
| 200 |
+
> 🧠 **MADGuard AI Explorer** helps simulate feedback loops in RAG pipelines and detect **Model Autophagy Disorder (MAD)**.
|
| 201 |
|
| 202 |
- Compare **real vs. synthetic input effects**
|
| 203 |
- Visualize the data flow
|
| 204 |
- Upload your `.csv` or `.json` data
|
| 205 |
+
- Get diagnostics based on lexical diversity and semantic similarity
|
| 206 |
"""
|
| 207 |
)
|
| 208 |
|
| 209 |
with gr.Accordion("📚 Research Reference", open=False):
|
| 210 |
gr.HTML(render_core_reference())
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 211 |
|
| 212 |
gr.Markdown("## 1. Pipeline Simulation")
|
| 213 |
data_source, description = render_pipeline(default="Synthetic Generated Data")
|
|
|
|
| 214 |
gr.HTML(description)
|
| 215 |
+
|
| 216 |
pipeline_output = gr.Image(type="filepath", label="Pipeline Graph")
|
| 217 |
warning_output = gr.HTML()
|
| 218 |
+
|
| 219 |
data_source.change(
|
| 220 |
fn=render_pipeline_warning, inputs=data_source, outputs=warning_output
|
| 221 |
)
|
|
|
|
| 223 |
fn=render_pipeline_graph, inputs=data_source, outputs=pipeline_output
|
| 224 |
)
|
| 225 |
interface.load(
|
| 226 |
+
fn=render_pipeline_graph, inputs=[data_source], outputs=[pipeline_output]
|
|
|
|
|
|
|
| 227 |
)
|
| 228 |
|
| 229 |
gr.Markdown("## 2. Upload CSV or JSON File")
|
|
|
|
| 234 |
|
| 235 |
gr.Markdown(
|
| 236 |
"""
|
| 237 |
+
📝 **Note:**
|
| 238 |
+
- **Model Output Column**: Model-generated responses/completions.
|
| 239 |
+
- **Training Data Column**: Candidate future training input.
|
| 240 |
+
"""
|
|
|
|
| 241 |
)
|
| 242 |
|
| 243 |
with gr.Row():
|
| 244 |
model_col_input = gr.Dropdown(
|
| 245 |
+
choices=[], label="Select column for model output", interactive=True
|
|
|
|
|
|
|
|
|
|
| 246 |
)
|
| 247 |
train_col_input = gr.Dropdown(
|
| 248 |
choices=[],
|
|
|
|
| 249 |
label="Select column for future training data",
|
| 250 |
interactive=True,
|
| 251 |
)
|
|
|
|
| 252 |
|
| 253 |
+
file_preview = gr.Markdown(label="📄 File Preview")
|
| 254 |
output_markdown = gr.Markdown(label="🔍 Evaluation Summary")
|
| 255 |
|
| 256 |
with gr.Accordion("📋 Research-Based Strategy Alignment", open=False):
|
|
|
|
| 273 |
)
|
| 274 |
|
| 275 |
def process_and_generate(
|
| 276 |
+
file_obj, model_col_val, train_col_val, data_source_val
|
| 277 |
):
|
| 278 |
error, graph, preview, markdown, ttr_out, ttr_tr, sim = process_data(
|
| 279 |
file_obj, model_col_val, train_col_val, data_source_val
|
|
|
|
| 302 |
ttr_train_metric,
|
| 303 |
similarity_metric,
|
| 304 |
]
|
| 305 |
+
|
| 306 |
clear_btn.click(
|
| 307 |
fn=clear_all_fields,
|
| 308 |
inputs=[],
|
|
|
|
| 328 |
gr.Markdown("---")
|
| 329 |
gr.Markdown(
|
| 330 |
"""
|
| 331 |
+
**Pro version coming soon:**
|
| 332 |
+
- Bulk CSV uploads
|
| 333 |
+
- Trend visualizations
|
| 334 |
+
- One-click export of audit reports
|
| 335 |
|
| 336 |
+
[📩 Join the waitlist](https://docs.google.com/forms/d/e/1FAIpQLSfAPPC_Gm7DQElQSWGSnoB6T5hMxb_rXSu48OC8E6TNGZuKgQ/viewform?usp=sharing&ouid=118007615320536574300)
|
| 337 |
+
"""
|
| 338 |
)
|
| 339 |
|
| 340 |
return interface
|
| 341 |
|
| 342 |
|
|
|
|
| 343 |
if __name__ == "__main__":
|
| 344 |
interface = main_interface()
|
| 345 |
interface.launch(server_name="0.0.0.0", server_port=7860)
|
visuals/layout.py
CHANGED
|
@@ -3,11 +3,10 @@ import graphviz
|
|
| 3 |
import pandas as pd
|
| 4 |
from typing import Tuple
|
| 5 |
import tempfile
|
| 6 |
-
import os
|
| 7 |
|
| 8 |
|
| 9 |
def render_page_header() -> str:
|
| 10 |
-
"""
|
| 11 |
return """
|
| 12 |
<div style="text-align: center; margin-top: 1rem;">
|
| 13 |
<h1 style="margin-bottom: 0.25rem;">MADGuard AI Explorer</h1>
|
|
@@ -17,21 +16,20 @@ def render_page_header() -> str:
|
|
| 17 |
|
| 18 |
|
| 19 |
def render_core_reference() -> str:
|
| 20 |
-
"""
|
| 21 |
return """
|
| 22 |
<details>
|
| 23 |
<summary>📚 arXiv:2307.01850</summary>
|
| 24 |
<p>
|
| 25 |
<b>Self-consuming LLMs: How and When Models Feed Themselves</b> – <i>Santurkar et al., 2023</i><br>
|
| 26 |
-
|
| 27 |
|
| 28 |
-
|
| 29 |
- Lexical diversity analysis
|
| 30 |
-
-
|
| 31 |
-
-
|
| 32 |
-
|
| 33 |
-
<i>"MADGuard AI Explorer is inspired by key findings from this research, aligning with early warnings and pipeline hygiene practices recommended in their work."</i>
|
| 34 |
|
|
|
|
| 35 |
📎 <a href="https://arxiv.org/pdf/2307.01850" target="_blank">Read Full Paper (arXiv)</a>
|
| 36 |
</p>
|
| 37 |
</details>
|
|
@@ -39,20 +37,21 @@ def render_core_reference() -> str:
|
|
| 39 |
|
| 40 |
|
| 41 |
def render_pipeline(default: str = "Real User Inputs") -> Tuple[gr.Radio, str]:
|
| 42 |
-
"""
|
| 43 |
with gr.Row():
|
| 44 |
source = gr.Radio(
|
| 45 |
["Real User Inputs", "Synthetic Generated Data"],
|
| 46 |
label="Select input source:",
|
| 47 |
value=default,
|
| 48 |
-
# Removed 'help' parameter to avoid TypeError with Gradio 4.44.0
|
| 49 |
)
|
| 50 |
-
description = """
|
|
|
|
|
|
|
| 51 |
return source, description
|
| 52 |
|
| 53 |
|
| 54 |
def render_pipeline_graph(source: str) -> str:
|
| 55 |
-
"""
|
| 56 |
dot = graphviz.Digraph(
|
| 57 |
graph_attr={"rankdir": "LR", "bgcolor": "transparent"},
|
| 58 |
node_attr={
|
|
@@ -72,7 +71,6 @@ def render_pipeline_graph(source: str) -> str:
|
|
| 72 |
"Retraining Set" if source == "Synthetic Generated Data" else "Embedding Store",
|
| 73 |
)
|
| 74 |
|
| 75 |
-
# Save to a temporary file and return the file path
|
| 76 |
with tempfile.NamedTemporaryFile(suffix=".png", delete=False) as tmp_file:
|
| 77 |
output_path = tmp_file.name
|
| 78 |
dot.render(filename=output_path, format="png", cleanup=True)
|
|
@@ -80,7 +78,7 @@ def render_pipeline_graph(source: str) -> str:
|
|
| 80 |
|
| 81 |
|
| 82 |
def render_pipeline_warning(source: str) -> str:
|
| 83 |
-
"""
|
| 84 |
if source == "Synthetic Generated Data":
|
| 85 |
return "<div style='color:red; font-weight:bold;'>⚠️ High loop risk: Model may be learning from its own outputs.</div>"
|
| 86 |
else:
|
|
@@ -88,7 +86,7 @@ def render_pipeline_warning(source: str) -> str:
|
|
| 88 |
|
| 89 |
|
| 90 |
def render_strategy_alignment() -> str:
|
| 91 |
-
"""
|
| 92 |
data = {
|
| 93 |
"Strategy from Research": [
|
| 94 |
"Lexical redundancy (e.g., n-gram overlap)",
|
|
|
|
| 3 |
import pandas as pd
|
| 4 |
from typing import Tuple
|
| 5 |
import tempfile
|
|
|
|
| 6 |
|
| 7 |
|
| 8 |
def render_page_header() -> str:
|
| 9 |
+
"""Render the page header for the app."""
|
| 10 |
return """
|
| 11 |
<div style="text-align: center; margin-top: 1rem;">
|
| 12 |
<h1 style="margin-bottom: 0.25rem;">MADGuard AI Explorer</h1>
|
|
|
|
| 16 |
|
| 17 |
|
| 18 |
def render_core_reference() -> str:
|
| 19 |
+
"""Render the reference to the research paper inspiring the app."""
|
| 20 |
return """
|
| 21 |
<details>
|
| 22 |
<summary>📚 arXiv:2307.01850</summary>
|
| 23 |
<p>
|
| 24 |
<b>Self-consuming LLMs: How and When Models Feed Themselves</b> – <i>Santurkar et al., 2023</i><br>
|
| 25 |
+
Introduces <b>Model Autophagy Disorder (MAD)</b>—where LLMs degrade from consuming their own outputs.<br><br>
|
| 26 |
|
| 27 |
+
Detection strategies implemented in MADGuard include:
|
| 28 |
- Lexical diversity analysis
|
| 29 |
+
- Semantic similarity scoring
|
| 30 |
+
- MAD risk score warnings
|
|
|
|
|
|
|
| 31 |
|
| 32 |
+
<i>MADGuard aligns with practices recommended in this paper.</i><br>
|
| 33 |
📎 <a href="https://arxiv.org/pdf/2307.01850" target="_blank">Read Full Paper (arXiv)</a>
|
| 34 |
</p>
|
| 35 |
</details>
|
|
|
|
| 37 |
|
| 38 |
|
| 39 |
def render_pipeline(default: str = "Real User Inputs") -> Tuple[gr.Radio, str]:
|
| 40 |
+
"""Render the source selector for RAG simulation."""
|
| 41 |
with gr.Row():
|
| 42 |
source = gr.Radio(
|
| 43 |
["Real User Inputs", "Synthetic Generated Data"],
|
| 44 |
label="Select input source:",
|
| 45 |
value=default,
|
|
|
|
| 46 |
)
|
| 47 |
+
description = """
|
| 48 |
+
<center>ℹ️ <b>Real User Inputs</b> = human queries. <b>Synthetic Generated Data</b> = model-generated content reused in training.</center>
|
| 49 |
+
"""
|
| 50 |
return source, description
|
| 51 |
|
| 52 |
|
| 53 |
def render_pipeline_graph(source: str) -> str:
|
| 54 |
+
"""Generate and return the file path of a RAG pipeline graph visualization."""
|
| 55 |
dot = graphviz.Digraph(
|
| 56 |
graph_attr={"rankdir": "LR", "bgcolor": "transparent"},
|
| 57 |
node_attr={
|
|
|
|
| 71 |
"Retraining Set" if source == "Synthetic Generated Data" else "Embedding Store",
|
| 72 |
)
|
| 73 |
|
|
|
|
| 74 |
with tempfile.NamedTemporaryFile(suffix=".png", delete=False) as tmp_file:
|
| 75 |
output_path = tmp_file.name
|
| 76 |
dot.render(filename=output_path, format="png", cleanup=True)
|
|
|
|
| 78 |
|
| 79 |
|
| 80 |
def render_pipeline_warning(source: str) -> str:
|
| 81 |
+
"""Return warning text based on selected data source."""
|
| 82 |
if source == "Synthetic Generated Data":
|
| 83 |
return "<div style='color:red; font-weight:bold;'>⚠️ High loop risk: Model may be learning from its own outputs.</div>"
|
| 84 |
else:
|
|
|
|
| 86 |
|
| 87 |
|
| 88 |
def render_strategy_alignment() -> str:
|
| 89 |
+
"""Return an HTML table comparing MADGuard features with research strategies."""
|
| 90 |
data = {
|
| 91 |
"Strategy from Research": [
|
| 92 |
"Lexical redundancy (e.g., n-gram overlap)",
|
visuals/score_card.py
CHANGED
|
@@ -1,74 +1,41 @@
|
|
| 1 |
-
import
|
| 2 |
-
|
| 3 |
|
| 4 |
|
| 5 |
-
def
|
| 6 |
-
|
| 7 |
-
|
| 8 |
-
|
| 9 |
-
|
| 10 |
-
risk_level: str,
|
| 11 |
-
) -> Tuple[str, str, str]:
|
| 12 |
-
"""Renders the evaluation summary and score details."""
|
| 13 |
|
| 14 |
-
color = {"High": "#e57373", "Medium": "#ffb74d", "Low": "#81c784"}[risk_level]
|
| 15 |
|
| 16 |
-
|
| 17 |
-
|
| 18 |
-
|
| 19 |
-
This suggests a **strong feedback loop**, meaning the model is likely to reinforce existing patterns rather than learning new behaviors.
|
| 20 |
-
**What You Can Do**:
|
| 21 |
-
- Replace synthetic data with more **diverse real user input** - Use **paraphrasing techniques** before reuse
|
| 22 |
-
- Add **augmentation or filtering** before retraining
|
| 23 |
-
""",
|
| 24 |
-
"Medium": """
|
| 25 |
-
🟠 **Moderate Risk Identified** There is some overlap between your outputs and training content.
|
| 26 |
-
Your model may partially reinforce existing phrasing patterns.
|
| 27 |
-
**Suggestions**:
|
| 28 |
-
- Mix synthetic and real inputs carefully
|
| 29 |
-
- Monitor training logs for semantic redundancy
|
| 30 |
-
""",
|
| 31 |
-
"Low": """
|
| 32 |
-
🟢 **Low Risk Score** Your model output and training data appear **diverse** and distinct.
|
| 33 |
-
This is a good sign that your model is learning from **new and varied sources**.
|
| 34 |
-
**You’re on the right track!**
|
| 35 |
-
""",
|
| 36 |
-
}
|
| 37 |
|
| 38 |
-
|
| 39 |
-
|
|
|
|
|
|
|
| 40 |
|
| 41 |
-
|
| 42 |
-
|
|
|
|
|
|
|
| 43 |
|
| 44 |
-
|
| 45 |
-
|
|
|
|
| 46 |
|
| 47 |
-
|
| 48 |
-
|
| 49 |
|
| 50 |
-
|
| 51 |
-
|
| 52 |
-
|
| 53 |
-
|
| 54 |
-
<div style='width: {mad_score * 100:.0f}%; background: {color}; height: 100%; border-radius: 10px;'></div>
|
| 55 |
-
</div>
|
| 56 |
-
"""
|
| 57 |
|
| 58 |
-
|
| 59 |
-
|
| 60 |
-
<summary>📊 Score Breakdown</summary>
|
| 61 |
-
TTR Component (0.3 × (1 - TTR)): {(1 - ttr_output) * 0.3:.2f}
|
| 62 |
-
Similarity Component (0.7 × Cosine): {similarity * 0.7:.2f}
|
| 63 |
-
MAD Score = 0.3 × (1 - TTR) + 0.7 × Semantic Similarity
|
| 64 |
-
</details>
|
| 65 |
-
"""
|
| 66 |
|
| 67 |
-
|
| 68 |
-
<details>
|
| 69 |
-
<summary>🔍 What does this score mean?</summary>
|
| 70 |
-
{risk_explanations[risk_level]}
|
| 71 |
-
</details>
|
| 72 |
-
"""
|
| 73 |
-
|
| 74 |
-
return summary, details, explanation
|
|
|
|
| 1 |
+
import pandas as pd
|
| 2 |
+
import streamlit as st
|
| 3 |
|
| 4 |
|
| 5 |
+
def calculate_type_token_ratio(text: str) -> float:
|
| 6 |
+
"""Calculate the Type-Token Ratio (TTR) for a given text."""
|
| 7 |
+
tokens = text.split()
|
| 8 |
+
unique_tokens = set(tokens)
|
| 9 |
+
return len(unique_tokens) / len(tokens) if tokens else 0
|
|
|
|
|
|
|
|
|
|
| 10 |
|
|
|
|
| 11 |
|
| 12 |
+
def generate_score_card(data: pd.DataFrame, text_col: str, mode: str) -> pd.DataFrame:
|
| 13 |
+
"""
|
| 14 |
+
Generate lexical metrics and risk scores for the uploaded dataset.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 15 |
|
| 16 |
+
Args:
|
| 17 |
+
data: The input DataFrame.
|
| 18 |
+
text_col: Name of the column containing text data.
|
| 19 |
+
mode: Data source type ('Real User Inputs' or 'Synthetic Generated Data').
|
| 20 |
|
| 21 |
+
Returns:
|
| 22 |
+
DataFrame with added metrics.
|
| 23 |
+
"""
|
| 24 |
+
result = data.copy()
|
| 25 |
|
| 26 |
+
if text_col not in result.columns:
|
| 27 |
+
st.error(f"Selected column '{text_col}' not found in uploaded data.")
|
| 28 |
+
return pd.DataFrame()
|
| 29 |
|
| 30 |
+
result["TTR"] = result[text_col].apply(calculate_type_token_ratio)
|
| 31 |
+
result["Length"] = result[text_col].apply(lambda x: len(x.split()))
|
| 32 |
|
| 33 |
+
# Risk scoring based on thresholds
|
| 34 |
+
result["Risk"] = result["TTR"].apply(
|
| 35 |
+
lambda ttr: "High" if ttr < 0.3 else "Medium" if ttr < 0.5 else "Low"
|
| 36 |
+
)
|
|
|
|
|
|
|
|
|
|
| 37 |
|
| 38 |
+
# Add a column to show data source
|
| 39 |
+
result["Data Source"] = mode
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 40 |
|
| 41 |
+
return result
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|