SamanthaStorm commited on
Commit
dcb0de6
·
verified ·
1 Parent(s): ecc77cc

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +19 -7
app.py CHANGED
@@ -22,10 +22,10 @@ LABELS = [
22
  ]
23
 
24
  THRESHOLDS = {
25
- "gaslighting": 0.25, "mockery": 0.15, "dismissiveness": 0.30, "control": 0.43, "guilt_tripping": 0.19,
26
- "apology_baiting": 0.45, "blame_shifting": 0.23, "projection": 0.50, "contradictory_statements": 0.25,
27
  "manipulation": 0.25, "deflection": 0.30, "insults": 0.34, "obscure_formal": 0.25, "recovery_phase": 0.25,
28
- "non_abusive": 2.0, "suicidal_threat": 0.45, "physical_threat": 0.02, "extreme_control": 0.36
29
  }
30
 
31
  PATTERN_LABELS = LABELS[:15]
@@ -63,11 +63,23 @@ def custom_sentiment(text):
63
  score = probs[0][label_idx].item()
64
  return {"label": label, "score": score}
65
 
 
 
 
 
 
 
 
 
 
 
66
  def calculate_abuse_level(scores, thresholds, motif_hits=None):
67
- triggered_scores = [
68
- score for label, score in zip(LABELS, scores) if score > thresholds[label]
69
- ]
70
- base_score = round(np.mean(triggered_scores) * 100, 2) if triggered_scores else 0.0
 
 
71
 
72
  motif_hits = motif_hits or []
73
  if any(label in motif_hits for label in {"physical_threat", "suicidal_threat", "extreme_control"}):
 
22
  ]
23
 
24
  THRESHOLDS = {
25
+ "gaslighting": 0.25, "mockery": 0.15, "dismissiveness": 0.45, "control": 0.43, "guilt_tripping": 0.15,
26
+ "apology_baiting": 0.2, "blame_shifting": 0.23, "projection": 0.50, "contradictory_statements": 0.25,
27
  "manipulation": 0.25, "deflection": 0.30, "insults": 0.34, "obscure_formal": 0.25, "recovery_phase": 0.25,
28
+ "non_abusive": 2.0, "suicidal_threat": 0.45, "physical_threat": 0.02, "extreme_control": 0.30
29
  }
30
 
31
  PATTERN_LABELS = LABELS[:15]
 
63
  score = probs[0][label_idx].item()
64
  return {"label": label, "score": score}
65
 
66
+ PATTERN_WEIGHTS = {
67
+ "physical_threat": 1.5,
68
+ "suicidal_threat": 1.4,
69
+ "extreme_control": 1.5,
70
+ "gaslighting": 1.3,
71
+ "control": 1.2,
72
+ "dismissiveness": 0.8,
73
+ "non_abusive": 0.0 # shouldn't contribute to abuse score
74
+ }
75
+
76
  def calculate_abuse_level(scores, thresholds, motif_hits=None):
77
+ weighted_scores = []
78
+ for label, score in zip(LABELS, scores):
79
+ if score > thresholds[label]:
80
+ weight = PATTERN_WEIGHTS.get(label, 1.0)
81
+ weighted_scores.append(score * weight)
82
+ base_score = round(np.mean(weighted_scores) * 100, 2) if weighted_scores else 0.0
83
 
84
  motif_hits = motif_hits or []
85
  if any(label in motif_hits for label in {"physical_threat", "suicidal_threat", "extreme_control"}):