Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -548,9 +548,25 @@ def analyze_single_message(text, thresholds):
|
|
548 |
100 if "threat" in threshold_labels or "control" in threshold_labels else 95
|
549 |
)
|
550 |
|
551 |
-
|
552 |
tone_tag = get_emotional_tone_tag(emotion_profile, sentiment, threshold_labels, abuse_score)
|
553 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
554 |
# Debug
|
555 |
print(f"Emotional Tone Tag: {tone_tag}")
|
556 |
print("Emotion Profile:")
|
|
|
548 |
100 if "threat" in threshold_labels or "control" in threshold_labels else 95
|
549 |
)
|
550 |
|
551 |
+
# Tag must happen after abuse score is finalized
|
552 |
tone_tag = get_emotional_tone_tag(emotion_profile, sentiment, threshold_labels, abuse_score)
|
553 |
|
554 |
+
# ---- Profanity + Anger Override Logic ----
|
555 |
+
profane_words = {"fuck", "fucking", "bitch", "shit", "cunt", "ho", "asshole", "dick", "whore", "slut"}
|
556 |
+
tokens = set(text.lower().split())
|
557 |
+
has_profane = any(word in tokens for word in profane_words)
|
558 |
+
|
559 |
+
anger_score = emotion_profile.get("Anger", 0)
|
560 |
+
short_text = len(tokens) <= 10
|
561 |
+
insult_score = next((s for l, s in top_patterns if l == "insults"), 0)
|
562 |
+
|
563 |
+
if has_profane and anger_score > 0.75 and short_text and insult_score > 0.1:
|
564 |
+
top_patterns = sorted(top_patterns, key=lambda x: x[1], reverse=True)
|
565 |
+
if top_patterns[0][0] != "insults":
|
566 |
+
top_patterns.insert(0, ("insults", insult_score))
|
567 |
+
|
568 |
+
# Debug
|
569 |
+
print(f"Emotional Tone Tag: {tone_tag}")
|
570 |
# Debug
|
571 |
print(f"Emotional Tone Tag: {tone_tag}")
|
572 |
print("Emotion Profile:")
|