SamanthaStorm commited on
Commit
d4713b6
·
verified ·
1 Parent(s): c7da5ee

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +28 -12
app.py CHANGED
@@ -406,24 +406,40 @@ def analyze_single_message(text, thresholds):
406
 
407
  # 🛡️ Prevent obscure language from being chosen unless it crosses a hard threshold
408
  MIN_OBSCURE_SCORE = 0.30
 
 
409
  if "obscure language" in passed and passed["obscure language"] < MIN_OBSCURE_SCORE:
410
  del passed["obscure language"]
411
 
412
- if passed:
413
- top_score = max(passed.values())
414
- close_matches = {label: score for label, score in passed.items() if (top_score - score) <= 0.05}
415
- sorted_close = sorted(close_matches.items(), key=lambda x: ESCALATION_HIERARCHY.index(x[0]))
416
- top_pattern_label, top_pattern_score = sorted_close[0]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
417
  else:
418
  top_pattern_label, top_pattern_score = top_patterns[0]
419
- matched_scores = [
420
- (label, score, PATTERN_WEIGHTS.get(label, 1.0))
421
- for label, score in zip(LABELS, scores)
422
- if score > adjusted_thresholds[label]
423
- ]
424
 
425
- abuse_score_raw = compute_abuse_score(matched_scores, sentiment)
426
- abuse_score = abuse_score_raw
 
427
 
428
  stage = get_risk_stage(threshold_labels, sentiment) if threshold_labels else 1
429
  if weapon_flag and stage < 2:
 
406
 
407
  # 🛡️ Prevent obscure language from being chosen unless it crosses a hard threshold
408
  MIN_OBSCURE_SCORE = 0.30
409
+ # 🛡️ Prevent obscure language from being chosen unless it crosses a hard threshold
410
+
411
  if "obscure language" in passed and passed["obscure language"] < MIN_OBSCURE_SCORE:
412
  del passed["obscure language"]
413
 
414
+ # 🎯 Calculate matched scores
415
+ matched_scores = [
416
+ (label, score, PATTERN_WEIGHTS.get(label, 1.0))
417
+ for label, score in zip(LABELS, scores)
418
+ if score > adjusted_thresholds[label]
419
+ ]
420
+
421
+ # 🏆 Determine top pattern
422
+ if passed:
423
+ top_score = max(passed.values())
424
+ close_matches = {
425
+ label: score for label, score in passed.items()
426
+ if (top_score - score) <= 0.05
427
+ }
428
+ sorted_close = sorted(
429
+ close_matches.items(),
430
+ key=lambda x: ESCALATION_HIERARCHY.index(x[0])
431
+ )
432
+ top_pattern_label, top_pattern_score = sorted_close[0]
433
+ else:
434
+ if not top_patterns:
435
+ top_pattern_label, top_pattern_score = "none", 0.0
436
  else:
437
  top_pattern_label, top_pattern_score = top_patterns[0]
438
+ top_score = top_pattern_score # ✅ define this safely
 
 
 
 
439
 
440
+ # 🧮 Compute abuse score
441
+ abuse_score_raw = compute_abuse_score(matched_scores, sentiment)
442
+ abuse_score = abuse_score_raw
443
 
444
  stage = get_risk_stage(threshold_labels, sentiment) if threshold_labels else 1
445
  if weapon_flag and stage < 2: