SamanthaStorm commited on
Commit
6dcef9d
·
verified ·
1 Parent(s): a877a49

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +7 -7
app.py CHANGED
@@ -489,18 +489,18 @@ def analyze_single_message(text, thresholds):
489
  if weapon_flag:
490
  abuse_score_raw = min(abuse_score_raw + 25, 100)
491
 
492
- # Final display tweak: swap 'insults' with its refined label
493
- if insult_label_display and "insults" in threshold_labels:
494
- threshold_labels = [
495
- insult_label_display if label == "insults" else label
496
- for label in threshold_labels
497
- ]
498
 
499
  return abuse_score, threshold_labels, top_patterns, {"label": sentiment}, stage, darvo_score
500
 
501
 
502
  # Tag must happen after abuse score is finalized
503
  tone_tag = get_emotional_tone_tag(emotion_profile, sentiment, threshold_labels, abuse_score)
 
 
 
 
 
 
504
 
505
  # ---- Profanity + Anger Override Logic ----
506
  profane_words = {"fuck", "fucking", "bitch", "shit", "cunt", "ho", "asshole", "dick", "whore", "slut"}
@@ -738,7 +738,7 @@ def analyze_composite(msg1, msg2, msg3, *answers_and_none):
738
  pats[0][0] if (pats := r[0][2]) else "none"
739
  for r in results
740
  ]
741
- timeline_image = generate_abuse_score_chart(dates_used, abuse_scores, pattern_labels)
742
  out += "\n\n" + escalation_text
743
  return out, timeline_image
744
 
 
489
  if weapon_flag:
490
  abuse_score_raw = min(abuse_score_raw + 25, 100)
491
 
 
 
 
 
 
 
492
 
493
  return abuse_score, threshold_labels, top_patterns, {"label": sentiment}, stage, darvo_score
494
 
495
 
496
  # Tag must happen after abuse score is finalized
497
  tone_tag = get_emotional_tone_tag(emotion_profile, sentiment, threshold_labels, abuse_score)
498
+ # Final display tweak: swap 'insults' with its refined label
499
+ if insult_label_display and "insults" in threshold_labels:
500
+ threshold_labels = [
501
+ insult_label_display if label == "insults" else label
502
+ for label in threshold_labels
503
+ ]
504
 
505
  # ---- Profanity + Anger Override Logic ----
506
  profane_words = {"fuck", "fucking", "bitch", "shit", "cunt", "ho", "asshole", "dick", "whore", "slut"}
 
738
  pats[0][0] if (pats := r[0][2]) else "none"
739
  for r in results
740
  ]
741
+ timeline_image = generate_abuse_score_chart(dates_used, abuse_scores, top_labels)
742
  out += "\n\n" + escalation_text
743
  return out, timeline_image
744