Spaces:
Running
on
Zero
Running
on
Zero
import gradio as gr | |
import torch | |
from transformers import RobertaForSequenceClassification, RobertaTokenizer | |
import numpy as np | |
# Load model and tokenizer with trust_remote_code in case it's needed | |
model_name = "SamanthaStorm/abuse-pattern-detector-v2" | |
model = RobertaForSequenceClassification.from_pretrained(model_name, trust_remote_code=True) | |
tokenizer = RobertaTokenizer.from_pretrained(model_name, trust_remote_code=True) | |
# Define labels (17 total) | |
LABELS = [ | |
"gaslighting", "mockery", "dismissiveness", "control", | |
"guilt_tripping", "apology_baiting", "blame_shifting", "projection", | |
"contradictory_statements", "manipulation", "deflection", "insults", | |
"obscure_formal", "recovery_phase", "suicidal_threat", "physical_threat", | |
"extreme_control" | |
] | |
# Custom thresholds for each label (make sure these match your original settings) | |
THRESHOLDS = { | |
"gaslighting": 0.25, | |
"mockery": 0.15, | |
"dismissiveness": 0.30, # original value, not 0.30 | |
"control": 0.43, | |
"guilt_tripping": 0.19, | |
"apology_baiting": 0.45, | |
"blame_shifting": 0.23, | |
"projection": 0.50, | |
"contradictory_statements": 0.25, | |
"manipulation": 0.25, | |
"deflection": 0.30, | |
"insults": 0.34, | |
"obscure_formal": 0.25, | |
"recovery_phase": 0.25, | |
"suicidal_threat": 0.45, | |
"physical_threat": 0.31, | |
"extreme_control": 0.36, | |
"non_abusive": 0.40 | |
} | |
# Define label groups using slicing (first 14: abuse patterns, last 3: danger cues) | |
PATTERN_LABELS = LABELS[:14] | |
DANGER_LABELS = LABELS[14:17] | |
def calculate_abuse_level(scores, thresholds): | |
triggered_scores = [score for label, score in zip(LABELS, scores) if score > thresholds[label]] | |
if not triggered_scores: | |
return 0.0 | |
return round(np.mean(triggered_scores) * 100, 2) | |
def interpret_abuse_level(score): | |
if score > 80: | |
return "Extreme / High Risk" | |
elif score > 60: | |
return "Severe / Harmful Pattern Present" | |
elif score > 40: | |
return "Likely Abuse" | |
elif score > 20: | |
return "Mild Concern" | |
else: | |
return "Very Low / Likely Safe" | |
def analyze_messages(input_text): | |
input_text = input_text.strip() | |
if not input_text: | |
return "Please enter a message for analysis.", None | |
# Tokenize input and generate model predictions | |
inputs = tokenizer(input_text, return_tensors="pt", truncation=True, padding=True) | |
with torch.no_grad(): | |
outputs = model(**inputs) | |
scores = torch.sigmoid(outputs.logits.squeeze(0)).numpy() | |
# Count the number of triggered abuse pattern and danger flags based on thresholds | |
pattern_count = sum(score > THRESHOLDS[label] for label, score in zip(PATTERN_LABELS, scores[:14])) | |
danger_flag_count = sum(score > THRESHOLDS[label] for label, score in zip(DANGER_LABELS, scores[14:17])) | |
# Build formatted raw score display | |
score_lines = [ | |
f"{label:25}: {score:.3f}" for label, score in zip(PATTERN_LABELS + DANGER_LABELS, scores) | |
] | |
raw_score_output = "\n".join(score_lines) | |
# Calculate overall abuse level and interpret it | |
abuse_level = calculate_abuse_level(scores, THRESHOLDS) | |
abuse_description = interpret_abuse_level(abuse_level) | |
# Resource logic based on the number of danger cues | |
if danger_flag_count >= 2: | |
resources = "Immediate assistance recommended. Please seek professional help or contact emergency services." | |
else: | |
resources = "For more information on abuse patterns, consider reaching out to support groups or professional counselors." | |
# Get top 2 highest scoring abuse patterns (excluding 'non_abusive') | |
scored_patterns = [(label, score) for label, score in zip(PATTERN_LABELS, scores[:14])] | |
top_patterns = sorted(scored_patterns, key=lambda x: x[1], reverse=True)[:2] | |
top_patterns_str = "\n".join([f"• {label.replace('_', ' ').title()}" for label, _ in top_patterns]) | |
# Format final result | |
result = ( | |
f"Abuse Risk Score: {abuse_level}% – {abuse_description}\n" | |
"This message contains signs of emotionally harmful communication that may indicate abusive patterns.\n\n" | |
f"Most Likely Patterns:\n{top_patterns_str}\n\n" | |
f"⚠️ Critical Danger Flags Detected: {danger_flag_count} of 3\n" | |
"The Danger Assessment is a validated tool that helps identify serious risk in intimate partner violence. " | |
"It flags communication patterns associated with increased risk of severe harm. " | |
"For more info, consider reaching out to support groups or professionals.\n\n" | |
f"Resources: {resources}" | |
) | |
# Return both a text summary and a JSON-like dict of scores per label | |
return result | |
# Updated Gradio Interface using new component syntax | |
iface = gr.Interface( | |
fn=analyze_messages, | |
inputs=gr.Textbox(lines=10, placeholder="Enter message here..."), | |
outputs=[ | |
gr.Textbox(label="Analysis Result"), | |
], | |
title="Abuse Pattern Detector" | |
) | |
if __name__ == "__main__": | |
iface.launch() |