SamanthaStorm commited on
Commit
f1948f2
Β·
verified Β·
1 Parent(s): eb528b9

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +99 -0
app.py ADDED
@@ -0,0 +1,99 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import torch
3
+ from transformers import AutoTokenizer, AutoModelForSequenceClassification
4
+ import numpy as np
5
+ import tempfile
6
+
7
+ # Load your updated model and tokenizer from Hugging Face
8
+ model_name = "SamanthaStorm/abuse-pattern-detector-v2"
9
+ model = AutoModelForSequenceClassification.from_pretrained(model_name, force_download=True)
10
+ tokenizer = AutoTokenizer.from_pretrained(model_name, force_download=True)
11
+
12
+ # Our model outputs 17 labels:
13
+ # - First 14 are abuse pattern categories
14
+ # - Last 3 are Danger Assessment cues
15
+ TOTAL_LABELS = 17
16
+
17
+ def analyze_messages(text):
18
+ input_text = text.strip()
19
+ if not input_text:
20
+ return "Please enter a message for analysis.", None
21
+
22
+ # Tokenize input text
23
+ inputs = tokenizer(input_text, return_tensors="pt", truncation=True, padding=True)
24
+ with torch.no_grad():
25
+ outputs = model(**inputs)
26
+
27
+ # Assume model logits shape is [17] (for a single example)
28
+ logits = outputs.logits.squeeze() # shape: [17]
29
+ scores = torch.sigmoid(logits).numpy()
30
+
31
+ # For the first 14 labels (abuse patterns), count how many exceed threshold 0.5
32
+ abuse_pattern_scores = scores[:14]
33
+ concerning_pattern_count = int(np.sum(abuse_pattern_scores > 0.5))
34
+
35
+ # For the last 3 labels (Danger Assessment cues), count how many exceed threshold 0.5
36
+ danger_scores = scores[14:17]
37
+ danger_flag_count = int(np.sum(danger_scores > 0.5))
38
+
39
+ # Map danger flag count to Danger Assessment Score
40
+ if danger_flag_count >= 2:
41
+ danger_assessment = "High"
42
+ elif danger_flag_count == 1:
43
+ danger_assessment = "Moderate"
44
+ else:
45
+ danger_assessment = "Low"
46
+
47
+ # Customize resource links based on Danger Assessment Score (with additional niche support)
48
+ if danger_assessment == "High":
49
+ resources = (
50
+ "**Immediate Help:** If you are in immediate danger, please call 911.\n\n"
51
+ "**Crisis Support:** National DV Hotline – Safety Planning: [thehotline.org/plan-for-safety](https://www.thehotline.org/plan-for-safety/)\n"
52
+ "**Legal Assistance:** WomensLaw – Legal Help for Survivors: [womenslaw.org](https://www.womenslaw.org/)\n"
53
+ "**Specialized Support:** For LGBTQ+, immigrants, and neurodivergent survivors, please consult local specialized services or visit RAINN: [rainn.org](https://www.rainn.org/)"
54
+ )
55
+ elif danger_assessment == "Moderate":
56
+ resources = (
57
+ "**Safety Planning:** The Hotline – What Is Emotional Abuse?: [thehotline.org/resources](https://www.thehotline.org/resources/what-is-emotional-abuse/)\n"
58
+ "**Relationship Health:** One Love Foundation – Digital Relationship Health: [joinonelove.org](https://www.joinonelove.org/)\n"
59
+ "**Support Chat:** National Domestic Violence Hotline Chat: [thehotline.org](https://www.thehotline.org/)\n"
60
+ "**Specialized Groups:** Look for support groups tailored for LGBTQ+, immigrant, and neurodivergent communities."
61
+ )
62
+ else: # Low risk
63
+ resources = (
64
+ "**Educational Resources:** Love Is Respect – Healthy Relationships: [loveisrespect.org](https://www.loveisrespect.org/)\n"
65
+ "**Therapy Finder:** Psychology Today – Find a Therapist: [psychologytoday.com](https://www.psychologytoday.com/us/therapists)\n"
66
+ "**Relationship Tools:** Relate – Relationship Health Tools: [relate.org.uk](https://www.relate.org.uk/)\n"
67
+ "**Community Support:** Consider community-based and online support groups, especially those focused on LGBTQ+, immigrant, and neurodivergent survivors."
68
+ )
69
+
70
+ # Prepare the output result with both scores
71
+ result_md = (
72
+ f"**Abuse Pattern Count:** {concerning_pattern_count}\n\n"
73
+ f"**Danger Assessment Score:** {danger_assessment}\n\n"
74
+ f"**Support Resources:**\n{resources}"
75
+ )
76
+
77
+ # Save the result to a temporary text file for download
78
+ with tempfile.NamedTemporaryFile(delete=False, suffix=".txt", mode="w") as f:
79
+ f.write(result_md)
80
+ report_path = f.name
81
+
82
+ return result_md, report_path
83
+
84
+ # Build the Gradio interface
85
+ with gr.Blocks() as demo:
86
+ gr.Markdown("# Abuse Pattern Detector - Risk Analysis")
87
+ gr.Markdown("Enter one or more messages (separated by newlines) for analysis.")
88
+
89
+ text_input = gr.Textbox(label="Input Messages", lines=10, placeholder="Type your message(s) here...")
90
+ result_output = gr.Markdown(label="Analysis Result")
91
+ download_output = gr.File(label="Download Report (.txt)")
92
+
93
+ text_input.submit(analyze_messages, inputs=text_input, outputs=[result_output, download_output])
94
+ analyze_btn = gr.Button("Analyze")
95
+ analyze_btn.click(analyze_messages, inputs=text_input, outputs=[result_output, download_output])
96
+
97
+ if __name__ == "__main__":
98
+ demo.launch()
99
+