Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -430,16 +430,16 @@ def analyze_single_message(text, thresholds):
|
|
430 |
emotion_profile = get_emotion_profile(text)
|
431 |
sentiment_score = emotion_profile.get("anger", 0) + emotion_profile.get("disgust", 0)
|
432 |
|
433 |
-
# Get model scores
|
434 |
inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True)
|
435 |
with torch.no_grad():
|
436 |
outputs = model(**inputs)
|
437 |
scores = torch.sigmoid(outputs.logits.squeeze(0)).numpy()
|
438 |
|
439 |
-
# Sentiment override if neutral
|
440 |
if emotion_profile.get("neutral", 0) > 0.85 and any(
|
441 |
-
scores[
|
442 |
-
for
|
443 |
):
|
444 |
sentiment = "undermining"
|
445 |
else:
|
@@ -458,16 +458,7 @@ def analyze_single_message(text, thresholds):
|
|
458 |
label for label, score in zip(LABELS, scores)
|
459 |
if score > adjusted_thresholds[label]
|
460 |
]
|
461 |
-
|
462 |
-
tone_tag = get_emotional_tone_tag(emotion_profile, sentiment, threshold_labels, abuse_score)
|
463 |
-
print(f"Emotional Tone Tag: {tone_tag}")
|
464 |
-
# Post-threshold validation: strip recovery if it occurs with undermining sentiment
|
465 |
-
# Override top label if recovery was removed
|
466 |
-
if "recovery" in threshold_labels and tone_tag == "forced accountability flip":
|
467 |
-
threshold_labels.remove("recovery")
|
468 |
-
top_patterns = [p for p in top_patterns if p[0] != "recovery"]
|
469 |
-
print("⚠️ Removing 'recovery' due to undermining sentiment (not genuine repair)")
|
470 |
-
threshold_labels.remove("recovery")
|
471 |
motifs = [phrase for _, phrase in matched_phrases]
|
472 |
|
473 |
darvo_score = calculate_darvo_score(
|
@@ -479,7 +470,7 @@ def analyze_single_message(text, thresholds):
|
|
479 |
)
|
480 |
|
481 |
top_patterns = sorted(
|
482 |
-
[(label, score) for label, score in zip(LABELS, scores)
|
483 |
key=lambda x: x[1],
|
484 |
reverse=True
|
485 |
)[:2]
|
@@ -493,17 +484,23 @@ def analyze_single_message(text, thresholds):
|
|
493 |
abuse_score_raw = compute_abuse_score(matched_scores, sentiment)
|
494 |
abuse_score = abuse_score_raw
|
495 |
|
|
|
496 |
stage = get_risk_stage(threshold_labels, sentiment) if threshold_labels else 1
|
497 |
if weapon_flag and stage < 2:
|
498 |
stage = 2
|
499 |
-
|
500 |
if weapon_flag:
|
501 |
abuse_score_raw = min(abuse_score_raw + 25, 100)
|
502 |
|
503 |
-
abuse_score = min(
|
|
|
|
|
|
|
504 |
|
|
|
|
|
505 |
|
506 |
-
# Debug
|
|
|
507 |
print("Emotion Profile:")
|
508 |
for emotion, score in emotion_profile.items():
|
509 |
print(f" {emotion.capitalize():10}: {score}")
|
|
|
430 |
emotion_profile = get_emotion_profile(text)
|
431 |
sentiment_score = emotion_profile.get("anger", 0) + emotion_profile.get("disgust", 0)
|
432 |
|
433 |
+
# Get model scores
|
434 |
inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True)
|
435 |
with torch.no_grad():
|
436 |
outputs = model(**inputs)
|
437 |
scores = torch.sigmoid(outputs.logits.squeeze(0)).numpy()
|
438 |
|
439 |
+
# Sentiment override if neutral is high while critical thresholds are passed
|
440 |
if emotion_profile.get("neutral", 0) > 0.85 and any(
|
441 |
+
scores[LABELS.index(l)] > thresholds[l]
|
442 |
+
for l in ["control", "threat", "blame shifting"]
|
443 |
):
|
444 |
sentiment = "undermining"
|
445 |
else:
|
|
|
458 |
label for label, score in zip(LABELS, scores)
|
459 |
if score > adjusted_thresholds[label]
|
460 |
]
|
461 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
462 |
motifs = [phrase for _, phrase in matched_phrases]
|
463 |
|
464 |
darvo_score = calculate_darvo_score(
|
|
|
470 |
)
|
471 |
|
472 |
top_patterns = sorted(
|
473 |
+
[(label, score) for label, score in zip(LABELS, scores)],
|
474 |
key=lambda x: x[1],
|
475 |
reverse=True
|
476 |
)[:2]
|
|
|
484 |
abuse_score_raw = compute_abuse_score(matched_scores, sentiment)
|
485 |
abuse_score = abuse_score_raw
|
486 |
|
487 |
+
# Risk stage logic
|
488 |
stage = get_risk_stage(threshold_labels, sentiment) if threshold_labels else 1
|
489 |
if weapon_flag and stage < 2:
|
490 |
stage = 2
|
|
|
491 |
if weapon_flag:
|
492 |
abuse_score_raw = min(abuse_score_raw + 25, 100)
|
493 |
|
494 |
+
abuse_score = min(
|
495 |
+
abuse_score_raw,
|
496 |
+
100 if "threat" in threshold_labels or "control" in threshold_labels else 95
|
497 |
+
)
|
498 |
|
499 |
+
# Tone tag must happen after abuse_score is finalized
|
500 |
+
tone_tag = get_emotional_tone_tag(emotion_profile, sentiment, threshold_labels, abuse_score)
|
501 |
|
502 |
+
# Debug
|
503 |
+
print(f"Emotional Tone Tag: {tone_tag}")
|
504 |
print("Emotion Profile:")
|
505 |
for emotion, score in emotion_profile.items():
|
506 |
print(f" {emotion.capitalize():10}: {score}")
|