|
import gradio as gr
|
|
from transformers import BertTokenizer, BertForSequenceClassification
|
|
import torch
|
|
|
|
|
|
model_name = "AventIQ-AI/bert-spam-detection"
|
|
tokenizer = BertTokenizer.from_pretrained(model_name)
|
|
model = BertForSequenceClassification.from_pretrained(model_name)
|
|
|
|
|
|
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
|
model.to(device)
|
|
model.eval()
|
|
|
|
|
|
def predict_spam(text):
|
|
"""Predicts whether a given text is spam or not."""
|
|
|
|
inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
|
|
inputs = {key: value.to(device) for key, value in inputs.items()}
|
|
|
|
|
|
with torch.no_grad():
|
|
outputs = model(**inputs)
|
|
logits = outputs.logits
|
|
probabilities = torch.softmax(logits, dim=1)
|
|
prediction = torch.argmax(probabilities, dim=1).item()
|
|
confidence = probabilities[0][prediction].item()
|
|
|
|
|
|
label_map = {0: "Not Spam", 1: "Spam"}
|
|
result = f"Prediction: {label_map[prediction]}\nConfidence: {confidence:.2f}"
|
|
return result
|
|
|
|
|
|
iface = gr.Interface(
|
|
fn=predict_spam,
|
|
inputs=gr.Textbox(label="๐ง Input Text", placeholder="Enter the email or message content here...", lines=5),
|
|
outputs=gr.Textbox(label="๐ Spam Detection Result"),
|
|
title="๐ก๏ธ BERT-Based Spam Detector",
|
|
description="Enter the content of an email or message to determine whether it's Spam or Not Spam.",
|
|
examples=[
|
|
["Congratulations! You've won a $1,000,000 lottery. Click here to claim your prize."],
|
|
["Hey, are we still meeting for lunch tomorrow?"],
|
|
["URGENT: Your account has been compromised. Reset your password immediately by clicking this link."],
|
|
["Don't miss out on our exclusive offer! Buy one, get one free on all items."],
|
|
["Can you send me the report by end of the day? Thanks!"]
|
|
],
|
|
theme="compact",
|
|
allow_flagging="never"
|
|
)
|
|
|
|
if __name__ == "__main__":
|
|
iface.launch()
|
|
|