SkwarczynskiP's picture
Enhanced visuals
691e7be verified
raw
history blame
4.26 kB
import gradio as gr
import random
from transformers import pipeline, AutoTokenizer, AutoModelForSequenceClassification
# Models included within the interface
models = ["bert-base-uncased", "roberta-base"]
# Datasets included within the interface
datasets = ["No Dataset Finetuning",
"vedantgaur/GPTOutputs-MWP - AI Data Only",
"vedantgaur/GPTOutputs-MWP - Human Data Only",
"vedantgaur/GPTOutputs-MWP - Both AI and Human Data",
"dmitva/human_ai_generated_text - Both AI and Human Data"]
# Mapping of user-selected model and dataset to actual model name on Hugging Face
model_mapping = {
("bert-base-uncased", "No Dataset Finetuning"): "bert-base-uncased",
("bert-base-uncased", "vedantgaur/GPTOutputs-MWP - AI Data Only"): "SkwarczynskiP/bert-base-uncased-finetuned-vedantgaur-AI-generated",
("bert-base-uncased", "vedantgaur/GPTOutputs-MWP - Human Data Only"): "SkwarczynskiP/bert-base-uncased-finetuned-vedantgaur-human-generated",
("bert-base-uncased", "vedantgaur/GPTOutputs-MWP - Both AI and Human Data"): "SkwarczynskiP/bert-base-uncased-finetuned-vedantgaur-AI-and-human-generated",
("bert-base-uncased", "dmitva/human_ai_generated_text - Both AI and Human Data"): "SkwarczynskiP/bert-base-uncased-finetuned-dmitva-AI-and-human-generated",
("roberta-base", "No Dataset Finetuning"): "roberta-base",
("roberta-base", "vedantgaur/GPTOutputs-MWP - AI Data Only"): "SkwarczynskiP/roberta-base-finetuned-vedantgaur-AI-generated",
("roberta-base", "vedantgaur/GPTOutputs-MWP - Human Data Only"): "SkwarczynskiP/roberta-base-finetuned-vedantgaur-human-generated",
("roberta-base", "vedantgaur/GPTOutputs-MWP - Both AI and Human Data"): "SkwarczynskiP/roberta-base-finetuned-vedantgaur-AI-and-human-generated",
("roberta-base", "dmitva/human_ai_generated_text - Both AI and Human Data"): "SkwarczynskiP/roberta-base-finetuned-dmitva-AI-and-human-generated"
}
# Example text included within the interface
exampleText = [
"ex1",
"ex2",
"ex3",
"ex4"
]
# Example models and datasets included within the interface
exampleModels = ["bert-base-uncased", "roberta-base"]
# Example datasets included within the interface
exampleDatasets = ["No Dataset Finetuning",
"vedantgaur/GPTOutputs-MWP - AI Data Only",
"vedantgaur/GPTOutputs-MWP - Human Data Only",
"vedantgaur/GPTOutputs-MWP - Both AI and Human Data",
"dmitva/human_ai_generated_text - Both AI and Human Data"]
examples = [[random.choice(exampleModels), random.choice(exampleDatasets), random.choice(exampleText)] for example in exampleText]
def detect_ai_generated_text(model: str, dataset: str, text: str) -> dict:
# Get the fine-tuned model using mapping
finetuned_model = model_mapping.get((model, dataset))
# Load the specific fine-tuned model
tokenizer = AutoTokenizer.from_pretrained(finetuned_model)
model = AutoModelForSequenceClassification.from_pretrained(finetuned_model)
# Classify the input based on the fine-tuned model
classifier = pipeline('text-classification', model=model, tokenizer=tokenizer)
result = classifier(text)
# Get the label and score
label = "AI-generated" if result[0]['label'] == 'LABEL_1' else "Human-written"
score = result[0]['score']
# Create HTML for the colored bars
ai_score = score if label == "AI-generated" else 1 - score
human_score = 1 - ai_score
ai_bar = f'<div style="background-color: red; width: {ai_score * 100}%; height: 20px;"></div>'
human_bar = f'<div style="background-color: blue; width: {human_score * 100}%; height: 20px;"></div>'
return {"label": f"{label} with confidence {score * 100:.2f}%", "ai_bar": ai_bar, "human_bar": human_bar}
interface = gr.Interface(
fn=detect_ai_generated_text,
inputs=[
gr.Dropdown(choices=models, label="Model"),
gr.Dropdown(choices=datasets, label="Dataset"),
gr.Textbox(lines=5, label="Input Text")
],
outputs=[
gr.Label(label="Output"),
gr.HTML(label="AI-generated"),
gr.HTML(label="Human-written")
],
examples=examples,
title="AI Generated Text Detection"
)
if __name__ == "__main__":
interface.launch()