SamanthaStorm commited on
Commit
831d2ad
·
verified ·
1 Parent(s): 68a049e

Delete app.py

Browse files
Files changed (1) hide show
  1. app.py +0 -755
app.py DELETED
@@ -1,755 +0,0 @@
1
- import gradio as gr
2
- import torch
3
- import numpy as np
4
- from transformers import pipeline, RobertaForSequenceClassification, RobertaTokenizer
5
- from motif_tagging import detect_motifs
6
- import re
7
- import matplotlib.pyplot as plt
8
- import io
9
- from PIL import Image
10
- from datetime import datetime
11
- from transformers import pipeline as hf_pipeline # prevent name collision with gradio pipeline
12
-
13
- def get_emotion_profile(text):
14
- emotions = emotion_pipeline(text)
15
- if isinstance(emotions, list) and isinstance(emotions[0], list):
16
- emotions = emotions[0]
17
- return {e['label'].lower(): round(e['score'], 3) for e in emotions}
18
- # Emotion model (no retraining needed)
19
- emotion_pipeline = hf_pipeline(
20
- "text-classification",
21
- model="j-hartmann/emotion-english-distilroberta-base",
22
- top_k=None,
23
- truncation=True
24
- )
25
-
26
- # --- Timeline Visualization Function ---
27
- def generate_abuse_score_chart(dates, scores, labels):
28
- import matplotlib.pyplot as plt
29
- import io
30
- from PIL import Image
31
- from datetime import datetime
32
- import re
33
-
34
- # Determine if all entries are valid dates
35
- if all(re.match(r"\d{4}-\d{2}-\d{2}", d) for d in dates):
36
- parsed_x = [datetime.strptime(d, "%Y-%m-%d") for d in dates]
37
- x_labels = [d.strftime("%Y-%m-%d") for d in parsed_x]
38
- else:
39
- parsed_x = list(range(1, len(dates) + 1))
40
- x_labels = [f"Message {i+1}" for i in range(len(dates))]
41
-
42
- fig, ax = plt.subplots(figsize=(8, 3))
43
- ax.plot(parsed_x, scores, marker='o', linestyle='-', color='darkred', linewidth=2)
44
-
45
- for x, y in zip(parsed_x, scores):
46
- ax.text(x, y + 2, f"{int(y)}%", ha='center', fontsize=8, color='black')
47
-
48
- ax.set_xticks(parsed_x)
49
- ax.set_xticklabels(x_labels)
50
- ax.set_xlabel("") # No axis label
51
- ax.set_ylabel("Abuse Score (%)")
52
- ax.set_ylim(0, 105)
53
- ax.grid(True)
54
- plt.tight_layout()
55
-
56
- buf = io.BytesIO()
57
- plt.savefig(buf, format='png')
58
- buf.seek(0)
59
- return Image.open(buf)
60
-
61
-
62
- # --- Abuse Model ---
63
- from transformers import AutoModelForSequenceClassification, AutoTokenizer
64
-
65
- model_name = "SamanthaStorm/tether-multilabel-v3"
66
- model = AutoModelForSequenceClassification.from_pretrained(model_name)
67
- tokenizer = AutoTokenizer.from_pretrained(model_name, use_fast=False)
68
-
69
- LABELS = [
70
- "recovery phase", "obscure language", "control", "gaslighting", "dismissiveness", "blame shifting", "nonabusive", "deflection", "projection", "insults", "contradictory statements",
71
- ]
72
-
73
- THRESHOLDS = {
74
- "recovery phase": 0.25,
75
- "control": 0.18,
76
- "gaslighting": 0.50,
77
- "guilt tripping": .38,
78
- "dismissiveness": 0.15,
79
- "blame shifting": 0.28,
80
- "nonabusive": 0.100,
81
- "projection": 0.35,
82
- "insults": 0.23,
83
- "contradictory statements": 0.23,
84
- "obscure language": 0.15
85
- }
86
-
87
- PATTERN_WEIGHTS = {
88
- "gaslighting": 1.5,
89
- "control": 1.2,
90
- "dismissiveness": 0.7,
91
- "blame shifting": 0.5,
92
- "insults": 1.4,
93
- "projection": 1.2,
94
- "recovery phase": 1.1,
95
- "nonabusive": 0.1,
96
- "deflection": 0.4,
97
- "obscure language": 0.9,
98
- "guilt tripping": 1.2
99
- }
100
- RISK_STAGE_LABELS = {
101
- 1: "🌀 Risk Stage: Tension-Building\nThis message reflects rising emotional pressure or subtle control attempts.",
102
- 2: "🔥 Risk Stage: Escalation\nThis message includes direct or aggressive patterns, suggesting active harm.",
103
- 3: "🌧️ Risk Stage: Reconciliation\nThis message reflects a reset attempt—apologies or emotional repair without accountability.",
104
- 4: "🌸 Risk Stage: Calm / Honeymoon\nThis message appears supportive but may follow prior harm, minimizing it."
105
- }
106
-
107
- ESCALATION_QUESTIONS = [
108
- ("Partner has access to firearms or weapons", 4),
109
- ("Partner threatened to kill you", 3),
110
- ("Partner threatened you with a weapon", 3),
111
- ("Partner has ever choked you, even if you considered it consensual at the time", 4),
112
- ("Partner injured or threatened your pet(s)", 3),
113
- ("Partner has broken your things, punched or kicked walls, or thrown things ", 2),
114
- ("Partner forced or coerced you into unwanted sexual acts", 3),
115
- ("Partner threatened to take away your children", 2),
116
- ("Violence has increased in frequency or severity", 3),
117
- ("Partner monitors your calls/GPS/social media", 2)
118
- ]
119
- DARVO_PATTERNS = [
120
- "blame shifting", # "You're the reason this happens"
121
- "projection", # "You're the abusive one"
122
- "deflection", # "This isn't about that"
123
- "dismissiveness", # "You're overreacting"
124
- "insults", # Personal attacks that redirect attention
125
- "aggression", # Escalates tone to destabilize
126
- "recovery phase", # Sudden affection following aggression
127
- "contradictory statements" # “I never said that” immediately followed by a version of what they said
128
- ]
129
- DARVO_MOTIFS = [
130
- "I never said that.", "You’re imagining things.", "That never happened.",
131
- "You’re making a big deal out of nothing.", "It was just a joke.", "You’re too sensitive.",
132
- "I don’t know what you’re talking about.", "You’re overreacting.", "I didn’t mean it that way.",
133
- "You’re twisting my words.", "You’re remembering it wrong.", "You’re always looking for something to complain about.",
134
- "You’re just trying to start a fight.", "I was only trying to help.", "You’re making things up.",
135
- "You’re blowing this out of proportion.", "You’re being paranoid.", "You’re too emotional.",
136
- "You’re always so dramatic.", "You’re just trying to make me look bad.",
137
-
138
- "You’re crazy.", "You’re the one with the problem.", "You’re always so negative.",
139
- "You’re just trying to control me.", "You’re the abusive one.", "You’re trying to ruin my life.",
140
- "You’re just jealous.", "You’re the one who needs help.", "You’re always playing the victim.",
141
- "You’re the one causing all the problems.", "You’re just trying to make me feel guilty.",
142
- "You’re the one who can’t let go of the past.", "You’re the one who’s always angry.",
143
- "You’re the one who’s always complaining.", "You’re the one who’s always starting arguments.",
144
- "You’re the one who’s always making things worse.", "You’re the one who’s always making me feel bad.",
145
- "You’re the one who’s always making me look like the bad guy.",
146
- "You’re the one who’s always making me feel like a failure.",
147
- "You’re the one who’s always making me feel like I’m not good enough.",
148
-
149
- "I can’t believe you’re doing this to me.", "You’re hurting me.",
150
- "You’re making me feel like a terrible person.", "You’re always blaming me for everything.",
151
- "You’re the one who’s abusive.", "You’re the one who’s controlling.", "You’re the one who’s manipulative.",
152
- "You’re the one who’s toxic.", "You’re the one who’s gaslighting me.",
153
- "You’re the one who’s always putting me down.", "You’re the one who’s always making me feel bad.",
154
- "You’re the one who’s always making me feel like I’m not good enough.",
155
- "You’re the one who’s always making me feel like I’m the problem.",
156
- "You’re the one who’s always making me feel like I’m the bad guy.",
157
- "You’re the one who’s always making me feel like I’m the villain.",
158
- "You’re the one who’s always making me feel like I’m the one who needs to change.",
159
- "You’re the one who’s always making me feel like I’m the one who’s wrong.",
160
- "You’re the one who’s always making me feel like I’m the one who’s crazy.",
161
- "You’re the one who’s always making me feel like I’m the one who’s abusive.",
162
- "You’re the one who’s always making me feel like I’m the one who’s toxic."
163
- ]
164
- def get_emotional_tone_tag(emotions, sentiment, patterns, abuse_score):
165
- sadness = emotions.get("sadness", 0)
166
- joy = emotions.get("joy", 0)
167
- neutral = emotions.get("neutral", 0)
168
- disgust = emotions.get("disgust", 0)
169
- anger = emotions.get("anger", 0)
170
- fear = emotions.get("fear", 0)
171
- disgust = emotions.get("disgust", 0)
172
-
173
- # 1. Performative Regret
174
- if (
175
- sadness > 0.4 and
176
- any(p in patterns for p in ["blame shifting", "guilt tripping", "recovery phase"]) and
177
- (sentiment == "undermining" or abuse_score > 40)
178
- ):
179
- return "performative regret"
180
-
181
- # 2. Coercive Warmth
182
- if (
183
- (joy > 0.3 or sadness > 0.4) and
184
- any(p in patterns for p in ["control", "gaslighting"]) and
185
- sentiment == "undermining"
186
- ):
187
- return "coercive warmth"
188
-
189
- # 3. Cold Invalidation
190
- if (
191
- (neutral + disgust) > 0.5 and
192
- any(p in patterns for p in ["dismissiveness", "projection", "obscure language"]) and
193
- sentiment == "undermining"
194
- ):
195
- return "cold invalidation"
196
-
197
- # 4. Genuine Vulnerability
198
- if (
199
- (sadness + fear) > 0.5 and
200
- sentiment == "supportive" and
201
- all(p in ["recovery phase"] for p in patterns)
202
- ):
203
- return "genuine vulnerability"
204
-
205
- # 5. Emotional Threat
206
- if (
207
- (anger + disgust) > 0.5 and
208
- any(p in patterns for p in ["control", "threat", "insults", "dismissiveness"]) and
209
- sentiment == "undermining"
210
- ):
211
- return "emotional threat"
212
-
213
- # 6. Weaponized Sadness
214
- if (
215
- sadness > 0.6 and
216
- any(p in patterns for p in ["guilt tripping", "projection"]) and
217
- sentiment == "undermining"
218
- ):
219
- return "weaponized sadness"
220
-
221
- # 7. Toxic Resignation
222
- if (
223
- neutral > 0.5 and
224
- any(p in patterns for p in ["dismissiveness", "obscure language"]) and
225
- sentiment == "undermining"
226
- ):
227
- return "toxic resignation"
228
- # 8. Aggressive Dismissal
229
- if (
230
- anger > 0.5 and
231
- any(p in patterns for p in ["aggression", "insults", "control"]) and
232
- sentiment == "undermining"
233
- ):
234
- return "aggressive dismissal"
235
- # 9. Deflective Hostility
236
- if (
237
- (0.2 < anger < 0.7 or 0.2 < disgust < 0.7) and
238
- any(p in patterns for p in ["deflection", "projection"]) and
239
- sentiment == "undermining"
240
- ):
241
- return "deflective hostility"
242
- # 10. Mocking Detachment
243
- if (
244
- (neutral + joy) > 0.5 and
245
- any(p in patterns for p in ["mockery", "insults", "projection"]) and
246
- sentiment == "undermining"
247
- ):
248
- return "mocking detachment"
249
- # 11. Contradictory Gaslight
250
- if (
251
- (joy + anger + sadness) > 0.5 and
252
- any(p in patterns for p in ["gaslighting", "contradictory statements"]) and
253
- sentiment == "undermining"
254
- ):
255
- return "contradictory gaslight"
256
- # 12. Calculated Neutrality
257
- if (
258
- neutral > 0.6 and
259
- any(p in patterns for p in ["obscure language", "deflection", "dismissiveness"]) and
260
- sentiment == "undermining"
261
- ):
262
- return "calculated neutrality"
263
- # 13. Forced Accountability Flip
264
- if (
265
- (anger + disgust) > 0.5 and
266
- any(p in patterns for p in ["blame shifting", "manipulation", "projection"]) and
267
- sentiment == "undermining"
268
- ):
269
- return "forced accountability flip"
270
- # 14. Conditional Affection
271
- if (
272
- joy > 0.4 and
273
- any(p in patterns for p in ["apology baiting", "control", "recovery phase"]) and
274
- sentiment == "undermining"
275
- ):
276
- return "conditional affection"
277
-
278
- if (
279
- (anger + disgust) > 0.5 and
280
- any(p in patterns for p in ["blame shifting", "projection", "deflection"]) and
281
- sentiment == "undermining"
282
- ):
283
- return "forced accountability flip"
284
-
285
- # Emotional Instability Fallback
286
- if (
287
- (anger + sadness + disgust) > 0.6 and
288
- sentiment == "undermining"
289
- ):
290
- return "emotional instability"
291
-
292
- return None
293
- def detect_contradiction(message):
294
- patterns = [
295
- (r"\b(i love you).{0,15}(i hate you|you ruin everything)", re.IGNORECASE),
296
- (r"\b(i’m sorry).{0,15}(but you|if you hadn’t)", re.IGNORECASE),
297
- (r"\b(i’m trying).{0,15}(you never|why do you)", re.IGNORECASE),
298
- (r"\b(do what you want).{0,15}(you’ll regret it|i always give everything)", re.IGNORECASE),
299
- (r"\b(i don’t care).{0,15}(you never think of me)", re.IGNORECASE),
300
- (r"\b(i guess i’m just).{0,15}(the bad guy|worthless|never enough)", re.IGNORECASE)
301
- ]
302
- return any(re.search(p, message, flags) for p, flags in patterns)
303
-
304
- def calculate_darvo_score(patterns, sentiment_before, sentiment_after, motifs_found, contradiction_flag=False):
305
- # Count all detected DARVO-related patterns
306
- pattern_hits = sum(1 for p in patterns if p.lower() in DARVO_PATTERNS)
307
-
308
- # Sentiment delta
309
- sentiment_shift_score = max(0.0, sentiment_after - sentiment_before)
310
-
311
- # Match against DARVO motifs more loosely
312
- motif_hits = sum(
313
- any(phrase.lower() in motif.lower() or motif.lower() in phrase.lower()
314
- for phrase in DARVO_MOTIFS)
315
- for motif in motifs_found
316
- )
317
- motif_score = motif_hits / max(len(DARVO_MOTIFS), 1)
318
-
319
- # Contradiction still binary
320
- contradiction_score = 1.0 if contradiction_flag else 0.0
321
-
322
- # Final DARVO score
323
- return round(min(
324
- 0.3 * pattern_hits +
325
- 0.3 * sentiment_shift_score +
326
- 0.25 * motif_score +
327
- 0.15 * contradiction_score, 1.0
328
- ), 3)
329
- def detect_weapon_language(text):
330
- weapon_keywords = [
331
- "knife", "knives", "stab", "cut you", "cutting",
332
- "gun", "shoot", "rifle", "firearm", "pistol",
333
- "bomb", "blow up", "grenade", "explode",
334
- "weapon", "armed", "loaded", "kill you", "take you out"
335
- ]
336
- text_lower = text.lower()
337
- return any(word in text_lower for word in weapon_keywords)
338
- def get_risk_stage(patterns, sentiment):
339
- if "threat" in patterns or "insults" in patterns:
340
- return 2
341
- elif "recovery phase" in patterns:
342
- return 3
343
- elif "control" in patterns or "guilt tripping" in patterns:
344
- return 1
345
- elif sentiment == "supportive" and any(p in patterns for p in ["projection", "dismissiveness"]):
346
- return 4
347
- return 1
348
-
349
- def generate_risk_snippet(abuse_score, top_label, escalation_score, stage):
350
- import re
351
-
352
- # Extract aggression score if aggression is detected
353
- if isinstance(top_label, str) and "aggression" in top_label.lower():
354
- try:
355
- match = re.search(r"\(?(\d+)\%?\)?", top_label)
356
- aggression_score = int(match.group(1)) / 100 if match else 0
357
- except:
358
- aggression_score = 0
359
- else:
360
- aggression_score = 0
361
-
362
- # Revised risk logic
363
- if abuse_score >= 85 or escalation_score >= 16:
364
- risk_level = "high"
365
- elif abuse_score >= 60 or escalation_score >= 8 or aggression_score >= 0.25:
366
- risk_level = "moderate"
367
- elif stage == 2 and abuse_score >= 40:
368
- risk_level = "moderate"
369
- else:
370
- risk_level = "low"
371
-
372
- if isinstance(top_label, str) and " – " in top_label:
373
- pattern_label, pattern_score = top_label.split(" – ")
374
- else:
375
- pattern_label = str(top_label) if top_label is not None else "Unknown"
376
- pattern_score = ""
377
-
378
- WHY_FLAGGED = {
379
- "control": "This message may reflect efforts to restrict someone’s autonomy, even if it's framed as concern or care.",
380
- "gaslighting": "This message could be manipulating someone into questioning their perception or feelings.",
381
- "dismissiveness": "This message may include belittling, invalidating, or ignoring the other person’s experience.",
382
- "insults": "Direct insults often appear in escalating abusive dynamics and can erode emotional safety.",
383
- "threat": "This message includes threatening language, which is a strong predictor of harm.",
384
- "blame shifting": "This message may redirect responsibility to avoid accountability, especially during conflict.",
385
- "guilt tripping": "This message may induce guilt in order to control or manipulate behavior.",
386
- "recovery phase": "This message may be part of a tension-reset cycle, appearing kind but avoiding change.",
387
- "projection": "This message may involve attributing the abuser’s own behaviors to the victim.",
388
- "contradictory statements": "This message may contain internal contradictions used to confuse, destabilize, or deflect responsibility.",
389
- "obscure language": "This message may use overly formal, vague, or complex language to obscure meaning or avoid accountability.",
390
- "default": "This message contains language patterns that may affect safety, clarity, or emotional autonomy."
391
- }
392
-
393
- explanation = WHY_FLAGGED.get(pattern_label.lower(), WHY_FLAGGED["default"])
394
-
395
- base = f"\n\n🛑 Risk Level: {risk_level.capitalize()}\n"
396
- base += f"This message shows strong indicators of **{pattern_label}**. "
397
-
398
- if risk_level == "high":
399
- base += "The language may reflect patterns of emotional control, even when expressed in soft or caring terms.\n"
400
- elif risk_level == "moderate":
401
- base += "There are signs of emotional pressure or verbal aggression that may escalate if repeated.\n"
402
- else:
403
- base += "The message does not strongly indicate abuse, but it's important to monitor for patterns.\n"
404
-
405
- base += f"\n💡 *Why this might be flagged:*\n{explanation}\n"
406
- base += f"\nDetected Pattern: **{pattern_label} ({pattern_score})**\n"
407
- base += "🧠 You can review the pattern in context. This tool highlights possible dynamics—not judgments."
408
- return base
409
-
410
- WHY_FLAGGED = {
411
- "control": "This message may reflect efforts to restrict someone’s autonomy, even if it's framed as concern or care.",
412
- "gaslighting": "This message could be manipulating someone into questioning their perception or feelings.",
413
- "dismissiveness": "This message may include belittling, invalidating, or ignoring the other person’s experience.",
414
- "insults": "Direct insults often appear in escalating abusive dynamics and can erode emotional safety.",
415
- "threat": "This message includes threatening language, which is a strong predictor of harm.",
416
- "blame shifting": "This message may redirect responsibility to avoid accountability, especially during conflict.",
417
- "guilt tripping": "This message may induce guilt in order to control or manipulate behavior.",
418
- "recovery phase": "This message may be part of a tension-reset cycle, appearing kind but avoiding change.",
419
- "projection": "This message may involve attributing the abuser’s own behaviors to the victim.",
420
- "contradictory statements": "This message may contain internal contradictions used to confuse, destabilize, or deflect responsibility.",
421
- "obscure language": "This message may use overly formal, vague, or complex language to obscure meaning or avoid accountability.",
422
- "default": "This message contains language patterns that may affect safety, clarity, or emotional autonomy."
423
- }
424
- explanation = WHY_FLAGGED.get(pattern_label.lower(), WHY_FLAGGED["default"])
425
-
426
- base = f"\n\n🛑 Risk Level: {risk_level.capitalize()}\n"
427
- base += f"This message shows strong indicators of **{pattern_label}**. "
428
-
429
- if risk_level == "high":
430
- base += "The language may reflect patterns of emotional control, even when expressed in soft or caring terms.\n"
431
- elif risk_level == "moderate":
432
- base += "There are signs of emotional pressure or indirect control that may escalate if repeated.\n"
433
- else:
434
- base += "The message does not strongly indicate abuse, but it's important to monitor for patterns.\n"
435
-
436
- base += f"\n💡 *Why this might be flagged:*\n{explanation}\n"
437
- base += f"\nDetected Pattern: **{pattern_label} ({pattern_score})**\n"
438
- base += "🧠 You can review the pattern in context. This tool highlights possible dynamics—not judgments."
439
- return base
440
- def compute_abuse_score(matched_scores, sentiment):
441
- if not matched_scores:
442
- return 0
443
-
444
- # Weighted average of passed patterns
445
- weighted_total = sum(score * weight for _, score, weight in matched_scores)
446
- weight_sum = sum(weight for _, _, weight in matched_scores)
447
- base_score = (weighted_total / weight_sum) * 100
448
-
449
- # Boost for pattern count
450
- pattern_count = len(matched_scores)
451
- scale = 1.0 + 0.25 * max(0, pattern_count - 1) # 1.25x for 2, 1.5x for 3+
452
- scaled_score = base_score * scale
453
-
454
- # Pattern floors
455
- FLOORS = {
456
- "threat": 70,
457
- "control": 40,
458
- "gaslighting": 30,
459
- "insults": 25,
460
- "aggression": 40
461
- }
462
- floor = max(FLOORS.get(label, 0) for label, _, _ in matched_scores)
463
- adjusted_score = max(scaled_score, floor)
464
-
465
- # Sentiment tweak
466
- if sentiment == "undermining" and adjusted_score < 50:
467
- adjusted_score += 10
468
-
469
- return min(adjusted_score, 100)
470
-
471
-
472
- def analyze_single_message(text, thresholds):
473
- motif_hits, matched_phrases = detect_motifs(text)
474
-
475
- # Get emotion profile
476
- emotion_profile = get_emotion_profile(text)
477
- sentiment_score = emotion_profile.get("anger", 0) + emotion_profile.get("disgust", 0)
478
-
479
- # Get model scores
480
- inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True)
481
- with torch.no_grad():
482
- outputs = model(**inputs)
483
- scores = torch.sigmoid(outputs.logits.squeeze(0)).numpy()
484
-
485
- # Sentiment override if neutral is high while critical thresholds are passed
486
- if emotion_profile.get("neutral", 0) > 0.85 and any(
487
- scores[LABELS.index(l)] > thresholds[l]
488
- for l in ["control", "threat", "blame shifting"]
489
- ):
490
- sentiment = "undermining"
491
- else:
492
- sentiment = "undermining" if sentiment_score > 0.25 else "supportive"
493
-
494
- weapon_flag = detect_weapon_language(text)
495
-
496
- adjusted_thresholds = {
497
- k: v + 0.05 if sentiment == "supportive" else v
498
- for k, v in thresholds.items()
499
- }
500
-
501
- contradiction_flag = detect_contradiction(text)
502
-
503
- threshold_labels = [
504
- label for label, score in zip(LABELS, scores)
505
- if score > adjusted_thresholds[label]
506
- ]
507
- tone_tag = get_emotional_tone_tag(emotion_profile, sentiment, threshold_labels, 0)
508
- motifs = [phrase for _, phrase in matched_phrases]
509
-
510
- darvo_score = calculate_darvo_score(
511
- threshold_labels,
512
- sentiment_before=0.0,
513
- sentiment_after=sentiment_score,
514
- motifs_found=motifs,
515
- contradiction_flag=contradiction_flag
516
- )
517
-
518
- top_patterns = sorted(
519
- [(label, score) for label, score in zip(LABELS, scores)],
520
- key=lambda x: x[1],
521
- reverse=True
522
- )[:2]
523
- # Post-threshold validation: strip recovery if it occurs with undermining sentiment
524
- if "recovery" in threshold_labels and tone_tag == "forced accountability flip":
525
- threshold_labels.remove("recovery")
526
- top_patterns = [p for p in top_patterns if p[0] != "recovery"]
527
- print("⚠️ Removing 'recovery' due to undermining sentiment (not genuine repair)")
528
-
529
- matched_scores = [
530
- (label, score, PATTERN_WEIGHTS.get(label, 1.0))
531
- for label, score in zip(LABELS, scores)
532
- if score > adjusted_thresholds[label]
533
- ]
534
-
535
- abuse_score_raw = compute_abuse_score(matched_scores, sentiment)
536
- abuse_score = abuse_score_raw
537
-
538
- # Risk stage logic
539
- stage = get_risk_stage(threshold_labels, sentiment) if threshold_labels else 1
540
- if weapon_flag and stage < 2:
541
- stage = 2
542
- if weapon_flag:
543
- abuse_score_raw = min(abuse_score_raw + 25, 100)
544
-
545
- abuse_score = min(
546
- abuse_score_raw,
547
- 100 if "threat" in threshold_labels or "control" in threshold_labels else 95
548
- )
549
-
550
- # Tone tag must happen after abuse_score is finalized
551
- tone_tag = get_emotional_tone_tag(emotion_profile, sentiment, threshold_labels, abuse_score)
552
-
553
- # Debug
554
- print(f"Emotional Tone Tag: {tone_tag}")
555
- print("Emotion Profile:")
556
- for emotion, score in emotion_profile.items():
557
- print(f" {emotion.capitalize():10}: {score}")
558
- print("\n--- Debug Info ---")
559
- print(f"Text: {text}")
560
- print(f"Sentiment (via emotion): {sentiment} (score: {round(sentiment_score, 3)})")
561
- print("Abuse Pattern Scores:")
562
- for label, score in zip(LABELS, scores):
563
- passed = "✅" if score > adjusted_thresholds[label] else "❌"
564
- print(f" {label:25} → {score:.3f} {passed}")
565
- print(f"Matched for score: {[(l, round(s, 3)) for l, s, _ in matched_scores]}")
566
- print(f"Abuse Score Raw: {round(abuse_score_raw, 1)}")
567
- print(f"Motifs: {motifs}")
568
- print(f"Contradiction: {contradiction_flag}")
569
- print("------------------\n")
570
-
571
- return abuse_score, threshold_labels, top_patterns, {"label": sentiment}, stage, darvo_score, tone_tag
572
-
573
- def analyze_composite(msg1, date1, msg2, date2, msg3, date3, *answers_and_none):
574
-
575
- none_selected_checked = answers_and_none[-1]
576
- responses_checked = any(answers_and_none[:-1])
577
- none_selected = not responses_checked and none_selected_checked
578
-
579
- if none_selected:
580
- escalation_score = None
581
- risk_level = "unknown"
582
- else:
583
- escalation_score = sum(w for (_, w), a in zip(ESCALATION_QUESTIONS, answers_and_none[:-1]) if a)
584
- predicted_labels = [label for r in results for label, _ in r[0][2]]
585
-
586
- for label in predicted_labels:
587
- if label in high:
588
- counts['high'] += 1
589
- elif label in moderate:
590
- counts['moderate'] += 1
591
- elif label in low:
592
- counts['low'] += 1
593
-
594
- # Abuse-based escalation
595
- if (counts['high'] >= 2 and counts['moderate'] >= 2):
596
- abuse_risk = 'Critical'
597
- elif (counts['high'] >= 2 and counts['moderate'] >= 1) or (counts['moderate'] >= 3) or (counts['high'] >= 1 and counts['moderate'] >= 2):
598
- abuse_risk = 'High'
599
- elif (counts['moderate'] == 2) or (counts['high'] == 1 and counts['moderate'] == 1) or (counts['moderate'] == 1 and counts['low'] >= 2) or (counts['high'] == 1 and sum(counts.values()) == 1):
600
- abuse_risk = 'Moderate'
601
- else:
602
- abuse_risk = 'Low'
603
-
604
- # Final combined risk level
605
- if escalation_score >= 8 or abuse_risk == 'Critical':
606
- risk_level = 'Critical'
607
- elif escalation_score >= 5 or abuse_risk == 'High':
608
- risk_level = 'High'
609
- elif escalation_score >= 2 or abuse_risk == 'Moderate':
610
- risk_level = 'Moderate'
611
- else:
612
- risk_level = 'Low'
613
-
614
- none_selected_checked = answers_and_none[-1]
615
- responses_checked = any(answers_and_none[:-1])
616
- none_selected = not responses_checked and none_selected_checked
617
-
618
- if none_selected:
619
- escalation_score = None
620
- risk_level = "unknown"
621
- else:
622
- escalation_score = sum(w for (_, w), a in zip(ESCALATION_QUESTIONS, answers_and_none[:-1]) if a)
623
-
624
- messages = [msg1, msg2, msg3]
625
- dates = [date1, date2, date3]
626
- active = [(m, d) for m, d in zip(messages, dates) if m.strip()]
627
- if not active:
628
- return "Please enter at least one message."
629
-
630
- # Run model on messages
631
- results = [(analyze_single_message(m, THRESHOLDS.copy()), d) for m, d in active]
632
- abuse_scores = [r[0][0] for r in results]
633
- top_labels = [r[0][1][0] if r[0][1] else r[0][2][0][0] for r in results]
634
- top_scores = [r[0][2][0][1] for r in results]
635
- sentiments = [r[0][3]['label'] for r in results]
636
- stages = [r[0][4] for r in results]
637
- darvo_scores = [r[0][5] for r in results]
638
- tone_tags= [r[0][6] for r in results]
639
- dates_used = [r[1] or "Undated" for r in results] # Store dates for future mapping
640
- # Calculate escalation bump *after* model results exist
641
- escalation_bump = 0
642
- for result, _ in results:
643
- abuse_score, threshold_labels, top_patterns, sentiment, stage, darvo_score, tone_tag = result
644
- if darvo_score > 0.65:
645
- escalation_bump += 3
646
- if tone_tag in ["forced accountability flip", "emotional threat"]:
647
- escalation_bump += 2
648
- if abuse_score > 80:
649
- escalation_bump += 2
650
- if stage == 2:
651
- escalation_bump += 3
652
-
653
- # Now we can safely calculate hybrid_score
654
- hybrid_score = escalation_score + escalation_bump if escalation_score is not None else 0
655
- risk_level = (
656
- "High" if hybrid_score >= 16 else
657
- "Moderate" if hybrid_score >= 8 else
658
- "Low"
659
- )
660
-
661
- # Now compute scores and allow override
662
- abuse_scores = [r[0][0] for r in results]
663
- stages = [r[0][4] for r in results]
664
-
665
- # Post-check override (e.g. stage 2 or high abuse score forces Moderate risk)
666
- if any(score > 70 for score in abuse_scores) or any(stage == 2 for stage in stages):
667
- if risk_level == "Low":
668
- risk_level = "Moderate"
669
-
670
- for result, date in results:
671
- assert len(result) == 7, "Unexpected output from analyze_single_message"
672
-
673
- # --- Composite Abuse Score using compute_abuse_score ---
674
- composite_abuse_scores = []
675
-
676
- for result, _ in results:
677
- _, _, top_patterns, sentiment, _, _, _ = result
678
- matched_scores = [(label, score, PATTERN_WEIGHTS.get(label, 1.0)) for label, score in top_patterns]
679
- final_score = compute_abuse_score(matched_scores, sentiment["label"])
680
- composite_abuse_scores.append(final_score)
681
-
682
- composite_abuse = int(round(sum(composite_abuse_scores) / len(composite_abuse_scores)))
683
-
684
-
685
-
686
- most_common_stage = max(set(stages), key=stages.count)
687
- stage_text = RISK_STAGE_LABELS[most_common_stage]
688
-
689
- avg_darvo = round(sum(darvo_scores) / len(darvo_scores), 3)
690
- darvo_blurb = ""
691
- if avg_darvo > 0.25:
692
- level = "moderate" if avg_darvo < 0.65 else "high"
693
- darvo_blurb = f"\n\n🎭 **DARVO Score: {avg_darvo}** → This indicates a **{level} likelihood** of narrative reversal (DARVO), where the speaker may be denying, attacking, or reversing blame."
694
-
695
- out = f"Abuse Intensity: {composite_abuse}%\n"
696
- out += "📊 This reflects the strength and severity of detected abuse patterns in the message(s).\n\n"
697
-
698
- # Save this line for later use at the
699
- if escalation_score is None:
700
- escalation_text = "📉 Escalation Potential: Unknown (Checklist not completed)\n"
701
- escalation_text += "⚠️ *This section was not completed. Escalation potential is unknown.*\n"
702
- hybrid_score = 0 # ✅ fallback so it's defined for generate_risk_snippet
703
- else:
704
- escalation_text = f"🧨 **Escalation Potential: {risk_level} ({escalation_score}/{sum(w for _, w in ESCALATION_QUESTIONS)})**\n"
705
- escalation_text += "This score comes directly from the safety checklist and functions as a standalone escalation risk score.\n"
706
- escalation_text += "It indicates how many serious risk factors are present based on your answers to the safety checklist.\n"
707
- # Derive top_label from the strongest top_patterns across all messages
708
- top_label = None
709
- if results:
710
- sorted_patterns = sorted(
711
- [(label, score) for r in results for label, score in r[0][2]],
712
- key=lambda x: x[1],
713
- reverse=True
714
- )
715
- if sorted_patterns:
716
- top_label = f"{sorted_patterns[0][0]} – {int(round(sorted_patterns[0][1] * 100))}%"
717
- if top_label is None:
718
- top_label = "Unknown – 0%"
719
- out += generate_risk_snippet(composite_abuse, top_label, hybrid_score if escalation_score is not None else 0, most_common_stage)
720
- out += f"\n\n{stage_text}"
721
- out += darvo_blurb
722
- out += "\n\n🎭 **Emotional Tones Detected:**\n"
723
- for i, tone in enumerate(tone_tags):
724
- label = tone if tone else "none"
725
- out += f"• Message {i+1}: *{label}*\n"
726
- print(f"DEBUG: avg_darvo = {avg_darvo}")
727
- pattern_labels = [r[0][2][0][0] for r in results] # top label for each message
728
- timeline_image = generate_abuse_score_chart(dates_used, abuse_scores, pattern_labels)
729
- out += "\n\n" + escalation_text
730
- return out, timeline_image
731
-
732
- message_date_pairs = [
733
- (
734
- gr.Textbox(label=f"Message {i+1}"),
735
- gr.Textbox(label=f"Date {i+1} (optional)", placeholder="YYYY-MM-DD")
736
- )
737
- for i in range(3)
738
- ]
739
- textbox_inputs = [item for pair in message_date_pairs for item in pair]
740
- quiz_boxes = [gr.Checkbox(label=q) for q, _ in ESCALATION_QUESTIONS]
741
- none_box = gr.Checkbox(label="None of the above")
742
-
743
- iface = gr.Interface(
744
- fn=analyze_composite,
745
- inputs=textbox_inputs + quiz_boxes + [none_box],
746
- outputs=[
747
- gr.Textbox(label="Results"),
748
- gr.Image(label="Abuse Score Timeline", type="pil")
749
- ],
750
- title="Abuse Pattern Detector + Escalation Quiz",
751
- allow_flagging="manual"
752
- )
753
-
754
- if __name__ == "__main__":
755
- iface.launch()