SamanthaStorm commited on
Commit
b405143
·
verified ·
1 Parent(s): 8d81cde

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +34 -6
app.py CHANGED
@@ -219,6 +219,38 @@ def generate_risk_snippet(abuse_score, top_label, escalation_score, stage):
219
  base += "🧠 You can review the pattern in context. This tool highlights possible dynamics—not judgments."
220
 
221
  return base
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
222
  def analyze_single_message(text, thresholds):
223
  motif_hits, matched_phrases = detect_motifs(text)
224
  result = sst_pipeline(text)[0]
@@ -267,12 +299,8 @@ def analyze_single_message(text, thresholds):
267
  if score > adjusted_thresholds[label]
268
  ]
269
 
270
- if matched_scores:
271
- weighted_total = sum(score * weight for _, score, weight in matched_scores)
272
- weight_sum = sum(weight for _, _, weight in matched_scores)
273
- abuse_score_raw = (weighted_total / weight_sum) * 100
274
- else:
275
- abuse_score_raw = 0
276
 
277
  # ✅ Always assign stage
278
  stage = get_risk_stage(threshold_labels, sentiment) if threshold_labels else 1
 
219
  base += "🧠 You can review the pattern in context. This tool highlights possible dynamics—not judgments."
220
 
221
  return base
222
+ def compute_abuse_score(matched_scores, sentiment):
223
+ """
224
+ Calculates abuse score based on passed patterns, their weights, and emotional context.
225
+ """
226
+ if not matched_scores:
227
+ return 0
228
+
229
+ # Weighted average of passed patterns
230
+ weighted_total = sum(score * weight for _, score, weight in matched_scores)
231
+ weight_sum = sum(weight for _, _, weight in matched_scores)
232
+ base_score = (weighted_total / weight_sum) * 100
233
+
234
+ # Boost for pattern count
235
+ pattern_count = len(matched_scores)
236
+ scale = 1.0 + 0.25 * max(0, pattern_count - 1) # 1.25x for 2, 1.5x for 3+
237
+ scaled_score = base_score * scale
238
+
239
+ # Pattern floors
240
+ FLOORS = {
241
+ "threat": 70,
242
+ "control": 40,
243
+ "gaslighting": 30,
244
+ "insults": 25
245
+ }
246
+ floor = max(FLOORS.get(label, 0) for label, _, _ in matched_scores)
247
+ adjusted_score = max(scaled_score, floor)
248
+
249
+ # Sentiment tweak
250
+ if sentiment == "undermining" and adjusted_score < 50:
251
+ adjusted_score += 10
252
+
253
+ return min(adjusted_score, 100)
254
  def analyze_single_message(text, thresholds):
255
  motif_hits, matched_phrases = detect_motifs(text)
256
  result = sst_pipeline(text)[0]
 
299
  if score > adjusted_thresholds[label]
300
  ]
301
 
302
+ abuse_score_raw = compute_abuse_score(matched_scores, sentiment)
303
+ abuse_score = abuse_score_raw
 
 
 
 
304
 
305
  # ✅ Always assign stage
306
  stage = get_risk_stage(threshold_labels, sentiment) if threshold_labels else 1