SamanthaStorm commited on
Commit
9deae4d
ยท
verified ยท
1 Parent(s): d31f068

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +35 -33
app.py CHANGED
@@ -394,67 +394,69 @@ def analyze_single_message(text, thresholds):
394
  key=lambda x: x[1],
395
  reverse=True
396
  )[:2]
397
- # Escalation-based top label override
398
  ESCALATION_HIERARCHY = [
399
  "threat", "insults", "control", "blame shifting", "gaslighting",
400
  "guilt tripping", "projection", "dismissiveness", "contradictory statements",
401
  "recovery phase", "obscure language"
402
- ]
403
 
404
  label_scores = {label: score for label, score in zip(LABELS, scores)}
405
  passed = {label: score for label in threshold_labels if label in label_scores}
406
 
407
- # ๐Ÿ›ก๏ธ Prevent obscure language from being chosen unless it crosses a hard threshold
408
- MIN_OBSCURE_SCORE = 0.30
409
  # ๐Ÿ›ก๏ธ Prevent obscure language from being chosen unless it crosses a hard threshold
410
-
411
  if "obscure language" in passed and passed["obscure language"] < MIN_OBSCURE_SCORE:
412
  del passed["obscure language"]
413
 
414
- # ๐ŸŽฏ Calculate matched scores
415
  matched_scores = [
416
  (label, score, PATTERN_WEIGHTS.get(label, 1.0))
417
  for label, score in zip(LABELS, scores)
418
  if score > adjusted_thresholds[label]
419
- ]
420
 
421
- # ๐Ÿ† Determine top pattern
422
- if passed:
423
- top_score = max(passed.values())
424
- close_matches = {
425
- label: score for label, score in passed.items()
426
- if (top_score - score) <= 0.05
427
- }
428
- sorted_close = sorted(
429
- close_matches.items(),
430
- key=lambda x: ESCALATION_HIERARCHY.index(x[0])
431
- )
432
- top_pattern_label, top_pattern_score = sorted_close[0]
433
- else:
434
- if not top_patterns:
435
- top_pattern_label, top_pattern_score = "none", 0.0
436
  else:
437
- top_pattern_label, top_pattern_score = top_patterns[0]
438
- top_score = top_pattern_score # โœ… define this safely
 
 
 
 
 
 
 
439
 
440
- # ๐Ÿงฎ Compute abuse score
441
- abuse_score_raw = compute_abuse_score(matched_scores, sentiment)
442
- abuse_score = abuse_score_raw
443
 
444
- stage = get_risk_stage(threshold_labels, sentiment) if threshold_labels else 1
445
  if weapon_flag and stage < 2:
446
  stage = 2
447
 
448
  if weapon_flag:
449
  abuse_score_raw = min(abuse_score_raw + 25, 100)
450
 
451
- abuse_score = min(abuse_score_raw, 100 if "threat" in threshold_labels or "control" in threshold_labels else 95)
 
 
 
452
 
453
- # Get tone tag
454
  tone_tag = get_emotional_tone_tag(emotion_profile, sentiment, threshold_labels, abuse_score)
455
  print(f"Emotional Tone Tag: {tone_tag}")
456
 
457
- # Debug logs
458
  print("Emotion Profile:")
459
  for emotion, score in emotion_profile.items():
460
  print(f" {emotion.capitalize():10}: {score}")
@@ -463,8 +465,8 @@ stage = get_risk_stage(threshold_labels, sentiment) if threshold_labels else 1
463
  print(f"Sentiment (via emotion): {sentiment} (score: {round(sentiment_score, 3)})")
464
  print("Abuse Pattern Scores:")
465
  for label, score in zip(LABELS, scores):
466
- passed = "โœ…" if score > adjusted_thresholds[label] else "โŒ"
467
- print(f" {label:25} โ†’ {score:.3f} {passed}")
468
  print(f"Matched for score: {[(l, round(s, 3)) for l, s, _ in matched_scores]}")
469
  print(f"Abuse Score Raw: {round(abuse_score_raw, 1)}")
470
  print(f"Motifs: {motifs}")
 
394
  key=lambda x: x[1],
395
  reverse=True
396
  )[:2]
397
+
398
  ESCALATION_HIERARCHY = [
399
  "threat", "insults", "control", "blame shifting", "gaslighting",
400
  "guilt tripping", "projection", "dismissiveness", "contradictory statements",
401
  "recovery phase", "obscure language"
402
+ ]
403
 
404
  label_scores = {label: score for label, score in zip(LABELS, scores)}
405
  passed = {label: score for label in threshold_labels if label in label_scores}
406
 
 
 
407
  # ๐Ÿ›ก๏ธ Prevent obscure language from being chosen unless it crosses a hard threshold
408
+ MIN_OBSCURE_SCORE = 0.30
409
  if "obscure language" in passed and passed["obscure language"] < MIN_OBSCURE_SCORE:
410
  del passed["obscure language"]
411
 
412
+ # ๐ŸŽฏ Calculate matched scores
413
  matched_scores = [
414
  (label, score, PATTERN_WEIGHTS.get(label, 1.0))
415
  for label, score in zip(LABELS, scores)
416
  if score > adjusted_thresholds[label]
417
+ ]
418
 
419
+ # ๐Ÿ† Determine top pattern
420
+ if passed:
421
+ top_score = max(passed.values())
422
+ close_matches = {
423
+ label: score for label, score in passed.items()
424
+ if (top_score - score) <= 0.05
425
+ }
426
+ sorted_close = sorted(
427
+ close_matches.items(),
428
+ key=lambda x: ESCALATION_HIERARCHY.index(x[0])
429
+ )
430
+ top_pattern_label, top_pattern_score = sorted_close[0]
 
 
 
431
  else:
432
+ if not top_patterns:
433
+ top_pattern_label, top_pattern_score = "none", 0.0
434
+ else:
435
+ top_pattern_label, top_pattern_score = top_patterns[0]
436
+ top_score = top_pattern_score
437
+
438
+ # ๐Ÿงฎ Compute abuse score
439
+ abuse_score_raw = compute_abuse_score(matched_scores, sentiment)
440
+ abuse_score = abuse_score_raw
441
 
442
+ stage = get_risk_stage(threshold_labels, sentiment) if threshold_labels else 1
 
 
443
 
 
444
  if weapon_flag and stage < 2:
445
  stage = 2
446
 
447
  if weapon_flag:
448
  abuse_score_raw = min(abuse_score_raw + 25, 100)
449
 
450
+ abuse_score = min(
451
+ abuse_score_raw,
452
+ 100 if "threat" in threshold_labels or "control" in threshold_labels else 95
453
+ )
454
 
455
+ # ๐ŸŽญ Get tone tag
456
  tone_tag = get_emotional_tone_tag(emotion_profile, sentiment, threshold_labels, abuse_score)
457
  print(f"Emotional Tone Tag: {tone_tag}")
458
 
459
+ # ๐Ÿงพ Debug logs
460
  print("Emotion Profile:")
461
  for emotion, score in emotion_profile.items():
462
  print(f" {emotion.capitalize():10}: {score}")
 
465
  print(f"Sentiment (via emotion): {sentiment} (score: {round(sentiment_score, 3)})")
466
  print("Abuse Pattern Scores:")
467
  for label, score in zip(LABELS, scores):
468
+ passed_mark = "โœ…" if score > adjusted_thresholds[label] else "โŒ"
469
+ print(f" {label:25} โ†’ {score:.3f} {passed_mark}")
470
  print(f"Matched for score: {[(l, round(s, 3)) for l, s, _ in matched_scores]}")
471
  print(f"Abuse Score Raw: {round(abuse_score_raw, 1)}")
472
  print(f"Motifs: {motifs}")