AntiBullyinghf / app.py
sste0051's picture
Update app.py
e3bf96e verified
import gradio as gr
from toxicity_model import (
check_toxicity,
is_custom_toxic,
check_sentiment,
paraphrase_text,
check_cyberbullying_with_hatebert,
is_person_or_pronoun
)
import mysql.connector
# MySQL connection settings
db_config = {
'host': 'cyberbullying.cbo2y20cex64.ap-southeast-2.rds.amazonaws.com',
'user': 'root',
'password': 'Beauisagoodboy',
'database': 'cyberbullying'
}
def log_to_rds(text, is_bullying, tox_score, sentiment_score, suggested, person_or_pronoun, cyberbullying_flag, zone_db, likelihood, comment):
"""
Logs analyzed message data into MySQL database.
"""
try:
connection = mysql.connector.connect(**db_config)
cursor = connection.cursor()
tox_score = float(tox_score)
sentiment_score = float(sentiment_score)
insert_query = """
INSERT INTO MESSAGE
(text, is_bullying, toxicity_score, sentiment_score, suggested_text, person_or_pronoun, cyberbullying_flag, zone, likelihood, comment)
VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s)
"""
cursor.execute(insert_query, (
text,
int(is_bullying),
tox_score,
sentiment_score,
suggested or "",
int(person_or_pronoun),
int(cyberbullying_flag),
zone_db,
likelihood,
comment
))
connection.commit()
cursor.close()
connection.close()
except mysql.connector.Error as err:
print(f"Database error: {err}")
def analyze(text):
"""
Performs cyberbullying analysis on text and returns the same outputs as FastAPI.
"""
tox_score = round(check_toxicity(text), 2)
sent_score = round(check_sentiment(text), 2)
custom_flag = is_custom_toxic(text)
person_or_pronoun = is_person_or_pronoun(text)
cyberbullying_flag = check_cyberbullying_with_hatebert(text)
is_bullying = custom_flag or tox_score >= 0.4 or cyberbullying_flag >= 0.5
suggested_text = paraphrase_text(text) if is_bullying else None
suggested_text = str(suggested_text) if suggested_text else ""
# Determine severity zone
if custom_flag or tox_score >= 0.7 or cyberbullying_flag >= 0.7:
zone = '🔴 Red Zone'
zone_db = 'Red Zone'
likelihood = 'Very high likelihood of bullying'
comment = 'Warning: this message looks very harmful. It may seriously hurt someone. This may cross the line into cyberbullying.'
elif tox_score >= 0.4 or cyberbullying_flag >= 0.4:
zone = '🟠 Orange Zone'
zone_db = 'Orange Zone'
likelihood = 'High likelihood of bullying'
comment = 'This could hurt someone’s feelings — try to say it in a more positive way.'
elif tox_score >= 0.2 or cyberbullying_flag >= 0.2:
zone = '🟡 Yellow Zone'
zone_db = 'Yellow Zone'
likelihood = 'Medium likelihood of bullying'
comment = 'Looks safe, but context matters — make sure it won’t hurt anyone.'
else:
zone = '🟢 Green Zone'
zone_db = 'Green Zone'
likelihood = 'Low likelihood of bullying'
comment = 'Looks good! No red flags here. Nice one!'
# Log to database
log_to_rds(text, is_bullying, tox_score, sent_score, suggested_text, person_or_pronoun, cyberbullying_flag, zone_db, likelihood, comment)
return zone, likelihood, comment, suggested_text
# ----------------------
# Gradio interface
# ----------------------
iface = gr.Interface(
fn=analyze,
inputs=gr.Textbox(label="Enter text", lines=3),
outputs=[
gr.Textbox(label="Zone"),
gr.Textbox(label="Likelihood"),
gr.Textbox(label="Comment"),
gr.Textbox(label="Suggested Text")
],
title="Cyberbullying Detection Tool"
)
if __name__ == "__main__":
iface.launch(share=True)