SamanthaStorm commited on
Commit
f6abe75
Β·
verified Β·
1 Parent(s): 83af1fa

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +17 -29
app.py CHANGED
@@ -9,15 +9,12 @@ import io
9
  from PIL import Image
10
  from datetime import datetime
11
  from transformers import pipeline as hf_pipeline # prevent name collision with gradio pipeline
12
- def get_emotional_tone_tag(emotion_profile, sentiment, patterns, abuse_score, text):
13
- sadness = emotion_profile.get("sadness", 0)
14
- joy = emotion_profile.get("joy", 0)
15
- neutral = emotion_profile.get("neutral", 0)
16
- disgust = emotion_profile.get("disgust", 0)
17
- anger = emotion_profile.get("anger", 0)
18
- fear = emotion_profile.get("fear", 0)
19
-
20
- # [then all the logic for performative regret β†’ hostile moralizing]
21
  # Emotion model (no retraining needed)
22
  emotion_pipeline = hf_pipeline(
23
  "text-classification",
@@ -133,17 +130,15 @@ ESCALATION_QUESTIONS = [
133
  ("Violence has increased in frequency or severity", 3),
134
  ("Partner monitors your calls/GPS/social media", 2)
135
  ]
 
 
 
 
 
 
 
 
136
 
137
-
138
- def get_emotional_tone_tag(emotion_profile, sentiment, patterns, abuse_score, text):
139
- sadness = emotion_profile.get("sadness", 0)
140
- joy = emotion_profile.get("joy", 0)
141
- neutral = emotion_profile.get("neutral", 0)
142
- disgust = emotion_profile.get("disgust", 0)
143
- anger = emotion_profile.get("anger", 0)
144
- fear = emotion_profile.get("fear", 0)
145
-
146
- # [then all the logic for performative regret β†’ hostile moralizing]
147
  # 1. Performative Regret
148
  if (
149
  sadness > 0.4 and
@@ -262,14 +257,6 @@ def get_emotional_tone_tag(emotion_profile, sentiment, patterns, abuse_score, te
262
  sentiment == "undermining"
263
  ):
264
  return "emotional instability"
265
-
266
- # 15. Hostile Moralizing
267
- if (
268
- any(p in patterns for p in ["dismissiveness", "blame shifting"]) and
269
- sentiment == "undermining" and
270
- any(word in text.lower() for word in ["victim", "sympathy", "expect", "started"])
271
- ):
272
- return "hostile moralizing"
273
 
274
  return None
275
  # πŸ”„ New DARVO score model (regression-based)
@@ -459,7 +446,7 @@ def analyze_single_message(text, thresholds):
459
  label for label, score in zip(LABELS, scores)
460
  if score > adjusted_thresholds[label]
461
  ]
462
- tone_tag = get_emotional_tone_tag(emotion_profile, sentiment, threshold_labels, abuse_score, text)
463
 
464
 
465
  top_patterns = sorted(
@@ -495,7 +482,7 @@ def analyze_single_message(text, thresholds):
495
  )
496
 
497
  # Tag must happen after abuse score is finalized
498
- tone_tag = get_emotional_tone_tag(emotion_profile, sentiment, threshold_labels, abuse_score, text)
499
 
500
  # ---- Profanity + Anger Override Logic ----
501
  profane_words = {"fuck", "fucking", "bitch", "shit", "cunt", "ho", "asshole", "dick", "whore", "slut"}
@@ -639,6 +626,7 @@ def analyze_composite(msg1, msg2, msg3, *answers_and_none):
639
  escalation_text += "πŸ“‹ This score combines your safety checklist answers *and* detected high-risk behavior.\n"
640
  escalation_text += f"β€’ Pattern Risk: {pattern_escalation_risk}\n"
641
  escalation_text += f"β€’ Checklist Risk: {checklist_escalation_risk}\n"
 
642
 
643
  # Composite Abuse Score
644
  composite_abuse_scores = []
 
9
  from PIL import Image
10
  from datetime import datetime
11
  from transformers import pipeline as hf_pipeline # prevent name collision with gradio pipeline
12
+
13
+ def get_emotion_profile(text):
14
+ emotions = emotion_pipeline(text)
15
+ if isinstance(emotions, list) and isinstance(emotions[0], list):
16
+ emotions = emotions[0]
17
+ return {e['label'].lower(): round(e['score'], 3) for e in emotions}
 
 
 
18
  # Emotion model (no retraining needed)
19
  emotion_pipeline = hf_pipeline(
20
  "text-classification",
 
130
  ("Violence has increased in frequency or severity", 3),
131
  ("Partner monitors your calls/GPS/social media", 2)
132
  ]
133
+ def get_emotional_tone_tag(emotions, sentiment, patterns, abuse_score):
134
+ sadness = emotions.get("sadness", 0)
135
+ joy = emotions.get("joy", 0)
136
+ neutral = emotions.get("neutral", 0)
137
+ disgust = emotions.get("disgust", 0)
138
+ anger = emotions.get("anger", 0)
139
+ fear = emotions.get("fear", 0)
140
+ disgust = emotions.get("disgust", 0)
141
 
 
 
 
 
 
 
 
 
 
 
142
  # 1. Performative Regret
143
  if (
144
  sadness > 0.4 and
 
257
  sentiment == "undermining"
258
  ):
259
  return "emotional instability"
 
 
 
 
 
 
 
 
260
 
261
  return None
262
  # πŸ”„ New DARVO score model (regression-based)
 
446
  label for label, score in zip(LABELS, scores)
447
  if score > adjusted_thresholds[label]
448
  ]
449
+ tone_tag = get_emotional_tone_tag(emotion_profile, sentiment, threshold_labels, 0)
450
 
451
 
452
  top_patterns = sorted(
 
482
  )
483
 
484
  # Tag must happen after abuse score is finalized
485
+ tone_tag = get_emotional_tone_tag(emotion_profile, sentiment, threshold_labels, abuse_score)
486
 
487
  # ---- Profanity + Anger Override Logic ----
488
  profane_words = {"fuck", "fucking", "bitch", "shit", "cunt", "ho", "asshole", "dick", "whore", "slut"}
 
626
  escalation_text += "πŸ“‹ This score combines your safety checklist answers *and* detected high-risk behavior.\n"
627
  escalation_text += f"β€’ Pattern Risk: {pattern_escalation_risk}\n"
628
  escalation_text += f"β€’ Checklist Risk: {checklist_escalation_risk}\n"
629
+ escalation_text += f"β€’ Escalation Bump: +{escalation_bump} (from DARVO, tone, intensity, etc.)"
630
 
631
  # Composite Abuse Score
632
  composite_abuse_scores = []