Tether / app.py
SamanthaStorm's picture
Update app.py
23bb2d2 verified
raw
history blame
7.36 kB
import gradio as gr
import torch
from transformers import RobertaForSequenceClassification, RobertaTokenizer
import numpy as np
from transformers import pipeline
# Load sentiment analysis model
sentiment_analyzer = pipeline("sentiment-analysis")
# Load model and tokenizer
model_name = "SamanthaStorm/abuse-pattern-detector-v2"
model = RobertaForSequenceClassification.from_pretrained(model_name, trust_remote_code=True)
tokenizer = RobertaTokenizer.from_pretrained(model_name, trust_remote_code=True)
# Define labels (18 total)
LABELS = [
"gaslighting", "mockery", "dismissiveness", "control",
"guilt_tripping", "apology_baiting", "blame_shifting", "projection",
"contradictory_statements", "manipulation", "deflection", "insults",
"obscure_formal", "recovery_phase", "non_abusive", "suicidal_threat", "physical_threat",
"extreme_control"
]
# Custom thresholds for each label
THRESHOLDS = {
"gaslighting": 0.25,
"mockery": 0.15,
"dismissiveness": 0.30,
"control": 0.43,
"guilt_tripping": 0.19,
"apology_baiting": 0.45,
"blame_shifting": 0.23,
"projection": 0.50,
"contradictory_statements": 0.25,
"manipulation": 0.25,
"deflection": 0.30,
"insults": 0.34,
"obscure_formal": 0.25,
"recovery_phase": 0.25,
"non_abusive": 0.70,
"suicidal_threat": 0.45,
"physical_threat": 0.20,
"extreme_control": 0.36
}
PATTERN_LABELS = LABELS[:15]
DANGER_LABELS = LABELS[15:18]
EXPLANATIONS = {
"gaslighting": "Gaslighting involves making someone question their own reality or perceptions, often causing them to feel confused or insecure.",
"blame_shifting": "Blame-shifting is when one person redirects the responsibility for an issue onto someone else, avoiding accountability.",
"projection": "Projection involves accusing the victim of behaviors or characteristics that the abuser themselves exhibit.",
"dismissiveness": "Dismissiveness is the act of belittling or disregarding another person's thoughts, feelings, or experiences.",
"mockery": "Mockery involves ridiculing or making fun of someone in a hurtful way, often with the intent to humiliate them.",
"recovery_phase": "Recovery phase refers to dismissing or invalidating someone’s process of emotional healing, or ignoring their need for support.",
"insults": "Insults are derogatory remarks aimed at degrading or humiliating someone, often targeting their personal traits or character.",
"apology_baiting": "Apology-baiting is when the abuser manipulates the victim into apologizing for something the abuser caused or did wrong.",
"deflection": "Deflection is the act of avoiding responsibility or shifting focus away from one's own actions, often to avoid accountability.",
"control": "Control tactics are behaviors that restrict or limit someone's autonomy, often involving domination, manipulation, or coercion.",
"extreme_control": "Extreme control involves excessive manipulation or domination over someone’s actions, decisions, or behaviors.",
"physical_threat": "Physical threats involve any indication or direct mention of harm to someone’s physical well-being, often used to intimidate or control.",
"suicidal_threat": "Suicidal threats are statements made to manipulate or control someone by making them feel responsible for the abuser’s well-being.",
"guilt_tripping": "Guilt-tripping involves making someone feel guilty or responsible for things they didn’t do, often to manipulate their behavior.",
"emotional_manipulation": "Emotional manipulation is using guilt, fear, or emotional dependency to control another person’s thoughts, feelings, or actions.",
"manipulation": "Manipulation refers to using deceptive tactics to control or influence someone’s emotions, decisions, or behavior to serve the manipulator’s own interests.",
"non_abusive": "Non-abusive language is communication that is respectful, empathetic, and free of harmful behaviors or manipulation."
}
def calculate_abuse_level(scores, thresholds):
triggered_scores = [score for label, score in zip(LABELS, scores) if score > thresholds[label]]
if not triggered_scores:
return 0.0
return round(np.mean(triggered_scores) * 100, 2)
def interpret_abuse_level(score):
if score > 80:
return "Extreme / High Risk"
elif score > 60:
return "Severe / Harmful Pattern Present"
elif score > 40:
return "Likely Abuse"
elif score > 20:
return "Mild Concern"
else:
return "Very Low / Likely Safe"
def analyze_messages(input_text):
input_text = input_text.strip()
if not input_text:
return "Please enter a message for analysis."
sentiment = sentiment_analyzer(input_text)[0]
sentiment_label = sentiment['label']
sentiment_score = sentiment['score']
adjusted_thresholds = THRESHOLDS.copy()
if sentiment_label == "NEGATIVE":
adjusted_thresholds = {key: val * 0.8 for key, val in THRESHOLDS.items()}
inputs = tokenizer(input_text, return_tensors="pt", truncation=True, padding=True)
with torch.no_grad():
outputs = model(**inputs)
scores = torch.sigmoid(outputs.logits.squeeze(0)).numpy()
pattern_count = sum(score > adjusted_thresholds[label] for label, score in zip(PATTERN_LABELS, scores[:15]))
danger_flag_count = sum(score > adjusted_thresholds[label] for label, score in zip(DANGER_LABELS, scores[15:18]))
non_abusive_score = scores[LABELS.index('non_abusive')]
if non_abusive_score > adjusted_thresholds['non_abusive']:
return "This message is classified as non-abusive."
abuse_level = calculate_abuse_level(scores, THRESHOLDS)
abuse_description = interpret_abuse_level(abuse_level)
if danger_flag_count >= 2:
resources = "Immediate assistance recommended. Please seek professional help or contact emergency services."
else:
resources = "For more information on abuse patterns, consider reaching out to support groups or professional counselors."
scored_patterns = [(label, score) for label, score in zip(PATTERN_LABELS, scores[:15])]
top_patterns = sorted(scored_patterns, key=lambda x: x[1], reverse=True)[:2]
top_pattern_explanations = "\n".join([
f"\u2022 {label.replace('_', ' ').title()}: {EXPLANATIONS.get(label, 'No explanation available.')}"
for label, _ in top_patterns
])
result = (
f"Abuse Risk Score: {abuse_level}% – {abuse_description}\n\n"
f"Most Likely Patterns:\n{top_pattern_explanations}\n\n"
f"⚠️ Critical Danger Flags Detected: {danger_flag_count} of 3\n"
"The Danger Assessment is a validated tool that helps identify serious risk in intimate partner violence. "
"It flags communication patterns associated with increased risk of severe harm. "
"For more info, consider reaching out to support groups or professionals.\n\n"
f"Resources: {resources} \n\n"
f"Sentiment: {sentiment_label} (Confidence: {sentiment_score*100:.2f}%)"
)
return result
iface = gr.Interface(
fn=analyze_messages,
inputs=gr.Textbox(lines=10, placeholder="Enter message here..."),
outputs=[
gr.Textbox(label="Analysis Result"),
],
title="Abuse Pattern Detector"
)
if __name__ == "__main__":
iface.launch()