Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -73,7 +73,7 @@ LABELS = [
|
|
73 |
]
|
74 |
|
75 |
THRESHOLDS = {
|
76 |
-
"blame shifting": 0.
|
77 |
"gaslighting": 0.11, "guilt tripping": 0.16, "insults": 0.14, "obscure language": 0.55,
|
78 |
"projection": 0.11, "recovery phase": 0.24, "threat": 0.09
|
79 |
}
|
@@ -345,7 +345,15 @@ def analyze_single_message(text, thresholds):
|
|
345 |
# Derive sentiment from emotion intensity
|
346 |
emotion_profile = get_emotion_profile(text)
|
347 |
sentiment_score = emotion_profile.get("anger", 0) + emotion_profile.get("disgust", 0)
|
348 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
349 |
|
350 |
weapon_flag = detect_weapon_language(text)
|
351 |
|
|
|
73 |
]
|
74 |
|
75 |
THRESHOLDS = {
|
76 |
+
"blame shifting": 0.30, "contradictory statements": 0.30, "control": 0.04, "dismissiveness": 0.09,
|
77 |
"gaslighting": 0.11, "guilt tripping": 0.16, "insults": 0.14, "obscure language": 0.55,
|
78 |
"projection": 0.11, "recovery phase": 0.24, "threat": 0.09
|
79 |
}
|
|
|
345 |
# Derive sentiment from emotion intensity
|
346 |
emotion_profile = get_emotion_profile(text)
|
347 |
sentiment_score = emotion_profile.get("anger", 0) + emotion_profile.get("disgust", 0)
|
348 |
+
|
349 |
+
# Override if neutral tone masks abusive content
|
350 |
+
if emotion_profile.get("neutral", 0) > 0.85 and any(
|
351 |
+
scores[label] > THRESHOLDS[label]
|
352 |
+
for label in ["control", "threat", "blame shifting"]
|
353 |
+
):
|
354 |
+
sentiment = "undermining"
|
355 |
+
else:
|
356 |
+
sentiment = "undermining" if sentiment_score > 0.25 else "supportive"
|
357 |
|
358 |
weapon_flag = detect_weapon_language(text)
|
359 |
|