Tether / app.py
SamanthaStorm's picture
Update app.py
79936aa verified
raw
history blame
5.27 kB
import gradio as gr
import torch
from transformers import RobertaForSequenceClassification, RobertaTokenizer
import numpy as np
import tempfile
# Load model and tokenizer
model_name = "SamanthaStorm/abuse-pattern-detector-v2"
model = RobertaForSequenceClassification.from_pretrained(model_name)
tokenizer = RobertaTokenizer.from_pretrained(model_name)
# Define the final label order your model used
LABELS = [
"gaslighting", "mockery", "dismissiveness", "control",
"guilt_tripping", "apology_baiting", "blame_shifting", "projection",
"contradictory_statements", "manipulation", "deflection", "insults",
"obscure_formal", "recovery_phase", "suicidal_threat", "physical_threat",
"extreme_control"
]
TOTAL_LABELS = 17
# Our model outputs 17 labels:
# - First 14 are abuse pattern categories
# - Last 3 are Danger Assessment cues
TOTAL_LABELS = 17
def analyze_messages(input_text):
input_text = input_text.strip()
if not input_text:
return "Please enter a message for analysis."
# Tokenize
inputs = tokenizer(input_text, return_tensors="pt", truncation=True, padding=True)
with torch.no_grad():
outputs = model(**inputs)
# Squeeze out batch dimension: shape should be [17]
logits = outputs.logits.squeeze(0)
# Convert logits to probabilities
scores = torch.sigmoid(logits).numpy()
# Debug printing (remove once you're confident everything works)
print("Scores:", scores)
# First 14 = pattern scores
pattern_scores = scores[:14]
pattern_count = int(np.sum(pattern_scores > 0.5))
# Last 3 = danger cues
danger_scores = scores[14:]
danger_flag_count = int(np.sum(danger_scores > 0.5))
# (Optional) Print label-by-label for debugging
for i, s in enumerate(scores):
print(LABELS[i], "=", round(s, 3))
# Map danger flag count to Danger Assessment Score
if danger_flag_count >= 2:
danger_assessment = "High"
elif danger_flag_count == 1:
danger_assessment = "Moderate"
else:
danger_assessment = "Low"
# Customize resource links based on Danger Assessment Score (with additional niche support)
if danger_assessment == "High":
resources = (
"**Immediate Help:** If you are in immediate danger, please call 911.\n\n"
"**Crisis Support:** National DV Hotline – Safety Planning: [thehotline.org/plan-for-safety](https://www.thehotline.org/plan-for-safety/)\n"
"**Legal Assistance:** WomensLaw – Legal Help for Survivors: [womenslaw.org](https://www.womenslaw.org/)\n"
"**Specialized Support:** For LGBTQ+, immigrants, and neurodivergent survivors, please consult local specialized services or visit RAINN: [rainn.org](https://www.rainn.org/)"
)
elif danger_assessment == "Moderate":
resources = (
"**Safety Planning:** The Hotline – What Is Emotional Abuse?: [thehotline.org/resources](https://www.thehotline.org/resources/what-is-emotional-abuse/)\n"
"**Relationship Health:** One Love Foundation – Digital Relationship Health: [joinonelove.org](https://www.joinonelove.org/)\n"
"**Support Chat:** National Domestic Violence Hotline Chat: [thehotline.org](https://www.thehotline.org/)\n"
"**Specialized Groups:** Look for support groups tailored for LGBTQ+, immigrant, and neurodivergent communities."
)
else: # Low risk
resources = (
"**Educational Resources:** Love Is Respect – Healthy Relationships: [loveisrespect.org](https://www.loveisrespect.org/)\n"
"**Therapy Finder:** Psychology Today – Find a Therapist: [psychologytoday.com](https://www.psychologytoday.com/us/therapists)\n"
"**Relationship Tools:** Relate – Relationship Health Tools: [relate.org.uk](https://www.relate.org.uk/)\n"
"**Community Support:** Consider community-based and online support groups, especially those focused on LGBTQ+, immigrant, and neurodivergent survivors."
)
# Prepare the output result with both scores
result_md = (
f"**Abuse Pattern Count:** {concerning_pattern_count}\n\n"
f"**Danger Assessment Score:** {danger_assessment}\n\n"
f"**Support Resources:**\n{resources}"
)
# Save the result to a temporary text file for download
with tempfile.NamedTemporaryFile(delete=False, suffix=".txt", mode="w") as f:
f.write(result_md)
report_path = f.name
return result_md, report_path
# Build the Gradio interface
with gr.Blocks() as demo:
gr.Markdown("# Abuse Pattern Detector - Risk Analysis")
gr.Markdown("Enter one or more messages (separated by newlines) for analysis.")
text_input = gr.Textbox(label="Input Messages", lines=10, placeholder="Type your message(s) here...")
result_output = gr.Markdown(label="Analysis Result")
download_output = gr.File(label="Download Report (.txt)")
text_input.submit(analyze_messages, inputs=text_input, outputs=[result_output, download_output])
analyze_btn = gr.Button("Analyze")
analyze_btn.click(analyze_messages, inputs=text_input, outputs=[result_output, download_output])
if __name__ == "__main__":
demo.launch()