Spaces:
Runtime error
Runtime error
import gradio as gr | |
import random | |
from transformers import pipeline, AutoTokenizer, AutoModelForSequenceClassification | |
# Models included within the interface | |
models = ["bert-base-uncased", "roberta-base"] | |
# Datasets included within the interface | |
datasets = ["No Dataset Finetuning", | |
"vedantgaur/GPTOutputs-MWP - AI Data Only", | |
"vedantgaur/GPTOutputs-MWP - Human Data Only", | |
"vedantgaur/GPTOutputs-MWP - Both AI and Human Data", | |
"dmitva/human_ai_generated_text - Both AI and Human Data"] | |
# Mapping of user-selected model and dataset to actual model name on Hugging Face | |
model_mapping = { | |
("bert-base-uncased", "No Dataset Finetuning"): "bert-base-uncased", | |
("bert-base-uncased", "vedantgaur/GPTOutputs-MWP - AI Data Only"): "SkwarczynskiP/bert-base-uncased-finetuned-vedantgaur-AI-generated", | |
("bert-base-uncased", "vedantgaur/GPTOutputs-MWP - Human Data Only"): "SkwarczynskiP/bert-base-uncased-finetuned-vedantgaur-human-generated", | |
("bert-base-uncased", "vedantgaur/GPTOutputs-MWP - Both AI and Human Data"): "SkwarczynskiP/bert-base-uncased-finetuned-vedantgaur-AI-and-human-generated", | |
("bert-base-uncased", "dmitva/human_ai_generated_text - Both AI and Human Data"): "SkwarczynskiP/bert-base-uncased-finetuned-dmitva-AI-and-human-generated", | |
("roberta-base", "No Dataset Finetuning"): "roberta-base", | |
("roberta-base", "vedantgaur/GPTOutputs-MWP - AI Data Only"): "SkwarczynskiP/roberta-base-finetuned-vedantgaur-AI-generated", | |
("roberta-base", "vedantgaur/GPTOutputs-MWP - Human Data Only"): "SkwarczynskiP/roberta-base-finetuned-vedantgaur-human-generated", | |
("roberta-base", "vedantgaur/GPTOutputs-MWP - Both AI and Human Data"): "SkwarczynskiP/roberta-base-finetuned-vedantgaur-AI-and-human-generated", | |
("roberta-base", "dmitva/human_ai_generated_text - Both AI and Human Data"): "SkwarczynskiP/roberta-base-finetuned-dmitva-AI-and-human-generated" | |
} | |
# Example text included within the interface | |
exampleText = [ | |
["ex1"], | |
["ex2"], | |
["ex3"], | |
["ex4"] | |
] | |
# Example models and datasets included within the interface | |
exampleModels = ["bert-base-uncased", "roberta-base"] | |
# Example datasets included within the interface | |
exampleDatasets = ["No Dataset Finetuning", | |
"vedantgaur/GPTOutputs-MWP - AI Data Only", | |
"vedantgaur/GPTOutputs-MWP - Human Data Only", | |
"vedantgaur/GPTOutputs-MWP - Both AI and Human Data", | |
"dmitva/human_ai_generated_text - Both AI and Human Data"] | |
examples = [[random.choice(exampleModels), random.choice(exampleDatasets), random.choice(exampleText)] for example in exampleText] | |
def detect_ai_generated_text(model: str, dataset: str, text: str) -> str: | |
# Get the fine-tuned model using mapping | |
finetuned_model = model_mapping.get((model, dataset)) | |
# Load the specific fine-tuned model | |
tokenizer = AutoTokenizer.from_pretrained(finetuned_model) | |
model = AutoModelForSequenceClassification.from_pretrained(finetuned_model) | |
# Classify the input based on the fine-tuned model | |
classifier = pipeline('text-classification', model=model, tokenizer=tokenizer) | |
result = classifier(text) | |
# Get the label and score | |
label = "AI-generated" if result[0]['label'] == 'LABEL_1' else "Human-written" | |
score = result[0]['score'] | |
return f"{label} with confidence {score * 100:.2f}%" | |
interface = gr.Interface( | |
fn=detect_ai_generated_text, | |
inputs=[ | |
gr.Dropdown(choices=models, label="Model"), | |
gr.Dropdown(choices=datasets, label="Dataset"), | |
gr.Textbox(lines=5, label="Input Text") | |
], | |
outputs=gr.Textbox(label="Output"), | |
examples=examples, | |
title="AI Generated Text Detection" | |
) | |
if __name__ == "__main__": | |
interface.launch() |