File size: 4,867 Bytes
f1948f2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
 import gradio as gr
import torch
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import numpy as np
import tempfile

# Load your updated model and tokenizer from Hugging Face
model_name = "SamanthaStorm/abuse-pattern-detector-v2"
model = AutoModelForSequenceClassification.from_pretrained(model_name, force_download=True)
tokenizer = AutoTokenizer.from_pretrained(model_name, force_download=True)

# Our model outputs 17 labels:
# - First 14 are abuse pattern categories
# - Last 3 are Danger Assessment cues
TOTAL_LABELS = 17

def analyze_messages(text):
    input_text = text.strip()
    if not input_text:
        return "Please enter a message for analysis.", None
    
    # Tokenize input text
    inputs = tokenizer(input_text, return_tensors="pt", truncation=True, padding=True)
    with torch.no_grad():
        outputs = model(**inputs)
    
    # Assume model logits shape is [17] (for a single example)
    logits = outputs.logits.squeeze()  # shape: [17]
    scores = torch.sigmoid(logits).numpy()
    
    # For the first 14 labels (abuse patterns), count how many exceed threshold 0.5
    abuse_pattern_scores = scores[:14]
    concerning_pattern_count = int(np.sum(abuse_pattern_scores > 0.5))
    
    # For the last 3 labels (Danger Assessment cues), count how many exceed threshold 0.5
    danger_scores = scores[14:17]
    danger_flag_count = int(np.sum(danger_scores > 0.5))
    
    # Map danger flag count to Danger Assessment Score
    if danger_flag_count >= 2:
        danger_assessment = "High"
    elif danger_flag_count == 1:
        danger_assessment = "Moderate"
    else:
        danger_assessment = "Low"
    
    # Customize resource links based on Danger Assessment Score (with additional niche support)
    if danger_assessment == "High":
        resources = (
            "**Immediate Help:** If you are in immediate danger, please call 911.\n\n"
            "**Crisis Support:** National DV Hotline – Safety Planning: [thehotline.org/plan-for-safety](https://www.thehotline.org/plan-for-safety/)\n"
            "**Legal Assistance:** WomensLaw – Legal Help for Survivors: [womenslaw.org](https://www.womenslaw.org/)\n"
            "**Specialized Support:** For LGBTQ+, immigrants, and neurodivergent survivors, please consult local specialized services or visit RAINN: [rainn.org](https://www.rainn.org/)"
        )
    elif danger_assessment == "Moderate":
        resources = (
            "**Safety Planning:** The Hotline – What Is Emotional Abuse?: [thehotline.org/resources](https://www.thehotline.org/resources/what-is-emotional-abuse/)\n"
            "**Relationship Health:** One Love Foundation – Digital Relationship Health: [joinonelove.org](https://www.joinonelove.org/)\n"
            "**Support Chat:** National Domestic Violence Hotline Chat: [thehotline.org](https://www.thehotline.org/)\n"
            "**Specialized Groups:** Look for support groups tailored for LGBTQ+, immigrant, and neurodivergent communities."
        )
    else:  # Low risk
        resources = (
            "**Educational Resources:** Love Is Respect – Healthy Relationships: [loveisrespect.org](https://www.loveisrespect.org/)\n"
            "**Therapy Finder:** Psychology Today – Find a Therapist: [psychologytoday.com](https://www.psychologytoday.com/us/therapists)\n"
            "**Relationship Tools:** Relate – Relationship Health Tools: [relate.org.uk](https://www.relate.org.uk/)\n"
            "**Community Support:** Consider community-based and online support groups, especially those focused on LGBTQ+, immigrant, and neurodivergent survivors."
        )
    
    # Prepare the output result with both scores
    result_md = (
        f"**Abuse Pattern Count:** {concerning_pattern_count}\n\n"
        f"**Danger Assessment Score:** {danger_assessment}\n\n"
        f"**Support Resources:**\n{resources}"
    )
    
    # Save the result to a temporary text file for download
    with tempfile.NamedTemporaryFile(delete=False, suffix=".txt", mode="w") as f:
        f.write(result_md)
        report_path = f.name
    
    return result_md, report_path

# Build the Gradio interface
with gr.Blocks() as demo:
    gr.Markdown("# Abuse Pattern Detector - Risk Analysis")
    gr.Markdown("Enter one or more messages (separated by newlines) for analysis.")
    
    text_input = gr.Textbox(label="Input Messages", lines=10, placeholder="Type your message(s) here...")
    result_output = gr.Markdown(label="Analysis Result")
    download_output = gr.File(label="Download Report (.txt)")
    
    text_input.submit(analyze_messages, inputs=text_input, outputs=[result_output, download_output])
    analyze_btn = gr.Button("Analyze")
    analyze_btn.click(analyze_messages, inputs=text_input, outputs=[result_output, download_output])
    
if __name__ == "__main__":
    demo.launch()