|
import streamlit as st
|
|
import torch
|
|
from transformers import BertForSequenceClassification, BertTokenizerFast
|
|
from transformers import AutoModelForSequenceClassification, AutoTokenizer
|
|
import time
|
|
import pandas as pd
|
|
import base64
|
|
from PIL import Image
|
|
import io
|
|
|
|
|
|
st.set_page_config(
|
|
page_title="SMS Spam Guard",
|
|
page_icon="🛡️",
|
|
layout="wide",
|
|
initial_sidebar_state="expanded"
|
|
)
|
|
|
|
|
|
def create_logo():
|
|
from PIL import Image, ImageDraw, ImageFont
|
|
import io
|
|
import base64
|
|
|
|
|
|
img = Image.new('RGBA', (200, 200), color=(0, 0, 0, 0))
|
|
draw = ImageDraw.Draw(img)
|
|
|
|
|
|
shield_color = (30, 58, 138)
|
|
|
|
|
|
points = [(100, 10), (180, 50), (160, 170), (100, 190), (40, 170), (20, 50)]
|
|
draw.polygon(points, fill=shield_color)
|
|
|
|
|
|
try:
|
|
font = ImageFont.truetype("arial.ttf", 80)
|
|
except IOError:
|
|
font = ImageFont.load_default()
|
|
|
|
|
|
draw.text((70, 60), "ST", fill=(255, 255, 255), font=font)
|
|
|
|
|
|
buffered = io.BytesIO()
|
|
img.save(buffered, format="PNG")
|
|
return base64.b64encode(buffered.getvalue()).decode()
|
|
|
|
|
|
st.markdown("""
|
|
<style>
|
|
.main-header {
|
|
font-size: 2.5rem !important;
|
|
color: #1E3A8A;
|
|
font-weight: 700;
|
|
margin-bottom: 0.5rem;
|
|
}
|
|
.sub-header {
|
|
font-size: 1.1rem;
|
|
color: #6B7280;
|
|
margin-bottom: 2rem;
|
|
}
|
|
.highlight {
|
|
background-color: #F3F4F6;
|
|
padding: 1.5rem;
|
|
border-radius: 0.5rem;
|
|
margin-bottom: 1rem;
|
|
}
|
|
.result-card {
|
|
background-color: #F0F9FF;
|
|
padding: 1.5rem;
|
|
border-radius: 0.5rem;
|
|
border-left: 5px solid #3B82F6;
|
|
margin-bottom: 1rem;
|
|
}
|
|
.spam-alert {
|
|
background-color: #FEF2F2;
|
|
border-left: 5px solid #EF4444;
|
|
}
|
|
.ham-alert {
|
|
background-color: #ECFDF5;
|
|
border-left: 5px solid #10B981;
|
|
}
|
|
.footer {
|
|
text-align: center;
|
|
margin-top: 3rem;
|
|
font-size: 0.8rem;
|
|
color: #9CA3AF;
|
|
}
|
|
.metrics-container {
|
|
display: flex;
|
|
justify-content: space-between;
|
|
margin-top: 1rem;
|
|
}
|
|
.metric-item {
|
|
text-align: center;
|
|
padding: 1rem;
|
|
background-color: #F9FAFB;
|
|
border-radius: 0.5rem;
|
|
box-shadow: 0 1px 3px rgba(0,0,0,0.1);
|
|
}
|
|
.language-tag {
|
|
display: inline-block;
|
|
padding: 0.25rem 0.5rem;
|
|
background-color: #E0E7FF;
|
|
color: #4F46E5;
|
|
border-radius: 9999px;
|
|
font-size: 0.8rem;
|
|
font-weight: 500;
|
|
margin-right: 0.5rem;
|
|
}
|
|
</style>
|
|
""", unsafe_allow_html=True)
|
|
|
|
@st.cache_resource
|
|
def load_language_model():
|
|
"""Load the language detection model"""
|
|
model_name = "papluca/xlm-roberta-base-language-detection"
|
|
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
|
model = AutoModelForSequenceClassification.from_pretrained(model_name)
|
|
return tokenizer, model
|
|
|
|
@st.cache_resource
|
|
def load_spam_model():
|
|
"""Load the fine-tuned BERT spam detection model"""
|
|
model_path = "chjivan/final"
|
|
tokenizer = BertTokenizerFast.from_pretrained(model_path)
|
|
model = BertForSequenceClassification.from_pretrained(model_path)
|
|
return tokenizer, model
|
|
|
|
def detect_language(text, tokenizer, model):
|
|
"""Detect the language of the input text"""
|
|
inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True)
|
|
with torch.no_grad():
|
|
outputs = model(**inputs)
|
|
|
|
|
|
logits = outputs.logits
|
|
probabilities = torch.softmax(logits, dim=1)[0]
|
|
|
|
|
|
predicted_class_id = torch.argmax(probabilities).item()
|
|
predicted_language = model.config.id2label[predicted_class_id]
|
|
confidence = probabilities[predicted_class_id].item()
|
|
|
|
|
|
top_3_indices = torch.topk(probabilities, 3).indices.tolist()
|
|
top_3_probs = torch.topk(probabilities, 3).values.tolist()
|
|
top_3_langs = [(model.config.id2label[idx], prob) for idx, prob in zip(top_3_indices, top_3_probs)]
|
|
|
|
return predicted_language, confidence, top_3_langs
|
|
|
|
def classify_spam(text, tokenizer, model):
|
|
"""Classify the input text as spam or ham"""
|
|
inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=128)
|
|
with torch.no_grad():
|
|
outputs = model(**inputs)
|
|
|
|
|
|
logits = outputs.logits
|
|
probabilities = torch.softmax(logits, dim=1)[0]
|
|
|
|
|
|
predicted_class_id = torch.argmax(probabilities).item()
|
|
confidence = probabilities[predicted_class_id].item()
|
|
|
|
is_spam = predicted_class_id == 1
|
|
return is_spam, confidence
|
|
|
|
|
|
logo_base64 = create_logo()
|
|
logo_html = f'<img src="data:image/png;base64,{logo_base64}" style="height:150px;">'
|
|
|
|
|
|
with st.spinner("Loading models... This may take a moment."):
|
|
lang_tokenizer, lang_model = load_language_model()
|
|
spam_tokenizer, spam_model = load_spam_model()
|
|
|
|
|
|
col1, col2 = st.columns([1, 5])
|
|
with col1:
|
|
st.markdown(logo_html, unsafe_allow_html=True)
|
|
with col2:
|
|
st.markdown('<h1 class="main-header">SMS Spam Guard</h1>', unsafe_allow_html=True)
|
|
st.markdown('<p class="sub-header">智能短信垃圾过滤助手 by SafeTalk Communications Ltd.</p>', unsafe_allow_html=True)
|
|
|
|
|
|
with st.sidebar:
|
|
st.markdown(logo_html, unsafe_allow_html=True)
|
|
st.markdown("### About SafeTalk")
|
|
st.markdown("SafeTalk Communications Ltd. provides intelligent communication security solutions to protect users from spam and fraudulent messages.")
|
|
st.markdown("#### Our Technology")
|
|
st.markdown("- ✅ Advanced AI-powered spam detection")
|
|
st.markdown("- 🌐 Multi-language support")
|
|
st.markdown("- 🔒 Secure and private processing")
|
|
st.markdown("- ⚡ Real-time analysis")
|
|
|
|
st.markdown("---")
|
|
st.markdown("### Sample Messages")
|
|
|
|
if st.button("Sample Spam (English)"):
|
|
st.session_state.sms_input = "URGENT: You have won a $1,000 Walmart gift card. Go to http://bit.ly/claim-prize to claim now before it expires!"
|
|
|
|
if st.button("Sample Legitimate (English)"):
|
|
st.session_state.sms_input = "Your Amazon package will be delivered today. Thanks for ordering from Amazon!"
|
|
|
|
if st.button("Sample Message (French)"):
|
|
st.session_state.sms_input = "Bonjour! Votre réservation pour le restaurant est confirmée pour ce soir à 20h. À bientôt!"
|
|
|
|
if st.button("Sample Message (Spanish)"):
|
|
st.session_state.sms_input = "Hola, tu cita médica está programada para mañana a las 10:00. Por favor llega 15 minutos antes."
|
|
|
|
|
|
st.markdown('<div class="highlight">', unsafe_allow_html=True)
|
|
|
|
sms_input = st.text_area(
|
|
"Enter the SMS message to analyze:",
|
|
value=st.session_state.get("sms_input", ""),
|
|
height=100,
|
|
key="sms_input",
|
|
help="Enter the SMS message you want to analyze for spam"
|
|
)
|
|
|
|
analyze_button = st.button("📱 Analyze Message", use_container_width=True)
|
|
st.markdown('</div>', unsafe_allow_html=True)
|
|
|
|
|
|
if analyze_button and sms_input:
|
|
with st.spinner("Analyzing message..."):
|
|
|
|
lang_start_time = time.time()
|
|
lang_code, lang_confidence, top_langs = detect_language(sms_input, lang_tokenizer, lang_model)
|
|
lang_time = time.time() - lang_start_time
|
|
|
|
|
|
lang_names = {
|
|
"ar": "Arabic",
|
|
"bg": "Bulgarian",
|
|
"de": "German",
|
|
"el": "Greek",
|
|
"en": "English",
|
|
"es": "Spanish",
|
|
"fr": "French",
|
|
"hi": "Hindi",
|
|
"it": "Italian",
|
|
"ja": "Japanese",
|
|
"nl": "Dutch",
|
|
"pl": "Polish",
|
|
"pt": "Portuguese",
|
|
"ru": "Russian",
|
|
"sw": "Swahili",
|
|
"th": "Thai",
|
|
"tr": "Turkish",
|
|
"ur": "Urdu",
|
|
"vi": "Vietnamese",
|
|
"zh": "Chinese"
|
|
}
|
|
|
|
lang_name = lang_names.get(lang_code, lang_code)
|
|
|
|
|
|
spam_start_time = time.time()
|
|
is_spam, spam_confidence = classify_spam(sms_input, spam_tokenizer, spam_model)
|
|
spam_time = time.time() - spam_start_time
|
|
|
|
|
|
st.markdown("### Analysis Results")
|
|
|
|
col1, col2 = st.columns(2)
|
|
|
|
with col1:
|
|
st.markdown("#### 📊 Language Detection")
|
|
st.markdown(f'<div class="result-card">', unsafe_allow_html=True)
|
|
st.markdown(f'<span class="language-tag">{lang_name}</span> Detected with {lang_confidence:.1%} confidence', unsafe_allow_html=True)
|
|
|
|
|
|
st.markdown("##### Top language probabilities:")
|
|
for lang_code, prob in top_langs:
|
|
lang_full = lang_names.get(lang_code, lang_code)
|
|
st.markdown(f"- {lang_full}: {prob:.1%}")
|
|
|
|
st.markdown(f"⏱️ Processing time: {lang_time:.3f} seconds")
|
|
st.markdown('</div>', unsafe_allow_html=True)
|
|
|
|
with col2:
|
|
st.markdown("#### 🔍 Spam Detection")
|
|
|
|
if is_spam:
|
|
st.markdown(f'<div class="result-card spam-alert">', unsafe_allow_html=True)
|
|
st.markdown(f"⚠️ **SPAM DETECTED** with {spam_confidence:.1%} confidence")
|
|
st.markdown("This message appears to be spam and potentially harmful.")
|
|
else:
|
|
st.markdown(f'<div class="result-card ham-alert">', unsafe_allow_html=True)
|
|
st.markdown(f"✅ **LEGITIMATE MESSAGE** with {spam_confidence:.1%} confidence")
|
|
st.markdown("This message appears to be legitimate.")
|
|
|
|
st.markdown(f"⏱️ Processing time: {spam_time:.3f} seconds")
|
|
st.markdown('</div>', unsafe_allow_html=True)
|
|
|
|
|
|
st.markdown("### 📋 Summary & Recommendations")
|
|
if is_spam:
|
|
st.warning("📵 **Recommended Action**: This message should be blocked or moved to spam folder.")
|
|
st.markdown("""
|
|
**Why this is likely spam:**
|
|
- Contains suspicious language patterns
|
|
- May include urgent calls to action
|
|
- Could contain unsolicited offers
|
|
""")
|
|
else:
|
|
st.success("✅ **Recommended Action**: This message can be delivered to the inbox.")
|
|
|
|
|
|
st.markdown("### 📈 Confidence Visualization")
|
|
chart_data = pd.DataFrame({
|
|
'Task': ['Language Detection', 'Spam Classification'],
|
|
'Confidence': [lang_confidence, spam_confidence if is_spam else 1-spam_confidence]
|
|
})
|
|
st.bar_chart(chart_data.set_index('Task'))
|
|
|
|
|
|
st.markdown('<div class="footer">', unsafe_allow_html=True)
|
|
st.markdown("© 2023 SafeTalk Communications Ltd. | www.safetalk.com")
|
|
st.markdown("SMS Spam Guard is an intelligent message filtering solution to protect users from unwanted communications.")
|
|
st.markdown('</div>', unsafe_allow_html=True) |