Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -9,7 +9,7 @@ model_name = "SamanthaStorm/abuse-pattern-detector-v2"
|
|
9 |
model = RobertaForSequenceClassification.from_pretrained(model_name)
|
10 |
tokenizer = RobertaTokenizer.from_pretrained(model_name)
|
11 |
|
12 |
-
# Define labels
|
13 |
LABELS = [
|
14 |
"gaslighting", "mockery", "dismissiveness", "control",
|
15 |
"guilt_tripping", "apology_baiting", "blame_shifting", "projection",
|
@@ -18,12 +18,12 @@ LABELS = [
|
|
18 |
"extreme_control"
|
19 |
]
|
20 |
|
21 |
-
# Custom thresholds per label (
|
22 |
THRESHOLDS = {
|
23 |
"gaslighting": 0.15,
|
24 |
"mockery": 0.15,
|
25 |
-
"dismissiveness": 0.25,
|
26 |
-
"control": 0.
|
27 |
"guilt_tripping": 0.15,
|
28 |
"apology_baiting": 0.15,
|
29 |
"blame_shifting": 0.15,
|
@@ -31,7 +31,7 @@ THRESHOLDS = {
|
|
31 |
"contradictory_statements": 0.15,
|
32 |
"manipulation": 0.15,
|
33 |
"deflection": 0.15,
|
34 |
-
"insults": 0.
|
35 |
"obscure_formal": 0.20,
|
36 |
"recovery_phase": 0.15,
|
37 |
"suicidal_threat": 0.08,
|
@@ -39,7 +39,7 @@ THRESHOLDS = {
|
|
39 |
"extreme_control": 0.30,
|
40 |
}
|
41 |
|
42 |
-
#
|
43 |
PATTERN_LABELS = LABELS[:14]
|
44 |
DANGER_LABELS = LABELS[14:]
|
45 |
|
@@ -72,64 +72,34 @@ def analyze_messages(input_text):
|
|
72 |
outputs = model(**inputs)
|
73 |
scores = torch.sigmoid(outputs.logits.squeeze(0)).numpy()
|
74 |
|
75 |
-
# Count triggered labels
|
76 |
pattern_count = sum(score > THRESHOLDS[label] for label, score in zip(PATTERN_LABELS, scores[:14]))
|
77 |
danger_flag_count = sum(score > THRESHOLDS[label] for label, score in zip(DANGER_LABELS, scores[14:]))
|
78 |
|
79 |
-
# Abuse level calculation
|
80 |
abuse_level = calculate_abuse_level(scores, THRESHOLDS)
|
81 |
abuse_description = interpret_abuse_level(abuse_level)
|
82 |
|
83 |
-
# Resource logic
|
84 |
if danger_flag_count >= 2:
|
85 |
-
resources =
|
86 |
-
"**Immediate Help:** Call 911 if in danger.\n\n"
|
87 |
-
"**Crisis Support:** National DV Hotline β [thehotline.org/plan-for-safety](https://www.thehotline.org/plan-for-safety/)\n"
|
88 |
-
"**Legal Support:** WomensLaw β [womenslaw.org](https://www.womenslaw.org/)\n"
|
89 |
-
"**Specialized Services:** RAINN, StrongHearts, LGBTQ+, immigrant, neurodivergent resources"
|
90 |
-
)
|
91 |
-
elif danger_flag_count == 1:
|
92 |
-
resources = (
|
93 |
-
"**Emotional Abuse Info:** [thehotline.org/resources](https://www.thehotline.org/resources/what-is-emotional-abuse/)\n"
|
94 |
-
"**Relationship Education:** [joinonelove.org](https://www.joinonelove.org/)\n"
|
95 |
-
"**Support Chat:** [thehotline.org](https://www.thehotline.org/)\n"
|
96 |
-
"**Community Groups:** LGBTQ+, immigrant, and neurodivergent spaces"
|
97 |
-
)
|
98 |
else:
|
99 |
-
resources =
|
100 |
-
|
101 |
-
|
102 |
-
|
103 |
-
|
104 |
-
)
|
105 |
-
|
106 |
-
|
107 |
-
f"### π Analysis Summary\n\n"
|
108 |
-
f"**Abuse Pattern Count:** {pattern_count}\n"
|
109 |
-
f"**Danger Cues Detected:** {danger_flag_count}\n"
|
110 |
-
f"**Abuse Level:** {abuse_level}% ({abuse_description})\n\n"
|
111 |
-
f"### π Suggested Support Resources\n{resources}"
|
112 |
)
|
|
|
113 |
|
114 |
-
|
115 |
-
|
116 |
-
|
117 |
-
|
118 |
-
|
119 |
-
|
120 |
-
|
121 |
-
# Interface
|
122 |
-
with gr.Blocks() as demo:
|
123 |
-
gr.Markdown("# π Abuse Pattern Detector")
|
124 |
-
gr.Markdown("Paste one or more messages for analysis (multi-line supported).")
|
125 |
-
|
126 |
-
text_input = gr.Textbox(label="Text Message(s)", lines=10, placeholder="Paste messages here...")
|
127 |
-
result_output = gr.Markdown()
|
128 |
-
file_output = gr.File(label="π₯ Download Analysis (.txt)")
|
129 |
-
|
130 |
-
text_input.submit(analyze_messages, inputs=text_input, outputs=[result_output, file_output])
|
131 |
-
analyze_btn = gr.Button("Analyze")
|
132 |
-
analyze_btn.click(analyze_messages, inputs=text_input, outputs=[result_output, file_output])
|
133 |
|
134 |
-
|
135 |
-
demo.launch()
|
|
|
9 |
model = RobertaForSequenceClassification.from_pretrained(model_name)
|
10 |
tokenizer = RobertaTokenizer.from_pretrained(model_name)
|
11 |
|
12 |
+
# Define labels (total 17 labels)
|
13 |
LABELS = [
|
14 |
"gaslighting", "mockery", "dismissiveness", "control",
|
15 |
"guilt_tripping", "apology_baiting", "blame_shifting", "projection",
|
|
|
18 |
"extreme_control"
|
19 |
]
|
20 |
|
21 |
+
# Custom thresholds per label (make sure these are exactly as in the original)
|
22 |
THRESHOLDS = {
|
23 |
"gaslighting": 0.15,
|
24 |
"mockery": 0.15,
|
25 |
+
"dismissiveness": 0.25, # Keep this as 0.25 (not 0.30)
|
26 |
+
"control": 0.13,
|
27 |
"guilt_tripping": 0.15,
|
28 |
"apology_baiting": 0.15,
|
29 |
"blame_shifting": 0.15,
|
|
|
31 |
"contradictory_statements": 0.15,
|
32 |
"manipulation": 0.15,
|
33 |
"deflection": 0.15,
|
34 |
+
"insults": 0.20,
|
35 |
"obscure_formal": 0.20,
|
36 |
"recovery_phase": 0.15,
|
37 |
"suicidal_threat": 0.08,
|
|
|
39 |
"extreme_control": 0.30,
|
40 |
}
|
41 |
|
42 |
+
# Define label groups using slicing (first 14 are abuse patterns, last 3 are danger cues)
|
43 |
PATTERN_LABELS = LABELS[:14]
|
44 |
DANGER_LABELS = LABELS[14:]
|
45 |
|
|
|
72 |
outputs = model(**inputs)
|
73 |
scores = torch.sigmoid(outputs.logits.squeeze(0)).numpy()
|
74 |
|
75 |
+
# Count triggered labels using the correct slices
|
76 |
pattern_count = sum(score > THRESHOLDS[label] for label, score in zip(PATTERN_LABELS, scores[:14]))
|
77 |
danger_flag_count = sum(score > THRESHOLDS[label] for label, score in zip(DANGER_LABELS, scores[14:]))
|
78 |
|
79 |
+
# Abuse level calculation and severity interpretation
|
80 |
abuse_level = calculate_abuse_level(scores, THRESHOLDS)
|
81 |
abuse_description = interpret_abuse_level(abuse_level)
|
82 |
|
83 |
+
# Resource logic (example logic; adjust as needed)
|
84 |
if danger_flag_count >= 2:
|
85 |
+
resources = "Immediate assistance recommended. Please seek professional help or contact emergency services."
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
86 |
else:
|
87 |
+
resources = "For more information on abuse patterns, consider reaching out to support groups or professional counselors."
|
88 |
+
|
89 |
+
# Output combining counts, severity, and resource suggestion
|
90 |
+
result = (
|
91 |
+
f"Abuse Patterns Detected: {pattern_count} out of {len(PATTERN_LABELS)}\n"
|
92 |
+
f"Danger Flags Detected: {danger_flag_count} out of {len(DANGER_LABELS)}\n"
|
93 |
+
f"Abuse Level: {abuse_level}% - {abuse_description}\n"
|
94 |
+
f"Resources: {resources}"
|
|
|
|
|
|
|
|
|
|
|
95 |
)
|
96 |
+
return result, scores
|
97 |
|
98 |
+
iface = gr.Interface(
|
99 |
+
fn=analyze_messages,
|
100 |
+
inputs=gr.inputs.Textbox(lines=10, placeholder="Enter message here..."),
|
101 |
+
outputs=["text", "json"],
|
102 |
+
title="Abuse Pattern Detector"
|
103 |
+
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
104 |
|
105 |
+
iface.launch()
|
|