Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -590,6 +590,49 @@ def analyze_composite(msg1, date1, msg2, date2, msg3, date3, *answers_and_none):
|
|
590 |
|
591 |
# Run model on messages
|
592 |
results = [(analyze_single_message(m, THRESHOLDS.copy()), d) for m, d in active]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
593 |
abuse_scores = [r[0][0] for r in results]
|
594 |
top_labels = [r[0][1][0] if r[0][1] else r[0][2][0][0] for r in results]
|
595 |
top_scores = [r[0][2][0][1] for r in results]
|
|
|
590 |
|
591 |
# Run model on messages
|
592 |
results = [(analyze_single_message(m, THRESHOLDS.copy()), d) for m, d in active]
|
593 |
+
# --- Combined Abuse Escalation Scoring ---
|
594 |
+
|
595 |
+
# Extract predicted abuse labels from all messages
|
596 |
+
predicted_labels = [label for r in results for label, _ in r[0][2]]
|
597 |
+
|
598 |
+
# Categorize by severity
|
599 |
+
high = {'control'}
|
600 |
+
moderate = {
|
601 |
+
'gaslighting', 'dismissiveness', 'obscure language',
|
602 |
+
'insults', 'contradictory statements', 'guilt tripping'
|
603 |
+
}
|
604 |
+
low = {'blame shifting', 'projection', 'recovery phase'}
|
605 |
+
|
606 |
+
# Count severity types
|
607 |
+
counts = {'high': 0, 'moderate': 0, 'low': 0}
|
608 |
+
for label in predicted_labels:
|
609 |
+
if label in high:
|
610 |
+
counts['high'] += 1
|
611 |
+
elif label in moderate:
|
612 |
+
counts['moderate'] += 1
|
613 |
+
elif label in low:
|
614 |
+
counts['low'] += 1
|
615 |
+
|
616 |
+
# Derive abuse_risk from combinations
|
617 |
+
if counts['high'] >= 2 and counts['moderate'] >= 2:
|
618 |
+
abuse_risk = 'Critical'
|
619 |
+
elif (counts['high'] >= 2 and counts['moderate'] >= 1) or (counts['moderate'] >= 3) or (counts['high'] >= 1 and counts['moderate'] >= 2):
|
620 |
+
abuse_risk = 'High'
|
621 |
+
elif (counts['moderate'] == 2) or (counts['high'] == 1 and counts['moderate'] == 1) or (counts['moderate'] == 1 and counts['low'] >= 2) or (counts['high'] == 1 and sum(counts.values()) == 1):
|
622 |
+
abuse_risk = 'Moderate'
|
623 |
+
else:
|
624 |
+
abuse_risk = 'Low'
|
625 |
+
|
626 |
+
# Combine abuse_risk and checklist score into final risk_level
|
627 |
+
if escalation_score is not None:
|
628 |
+
if escalation_score >= 8 or abuse_risk == 'Critical':
|
629 |
+
risk_level = 'Critical'
|
630 |
+
elif escalation_score >= 5 or abuse_risk == 'High':
|
631 |
+
risk_level = 'High'
|
632 |
+
elif escalation_score >= 2 or abuse_risk == 'Moderate':
|
633 |
+
risk_level = 'Moderate'
|
634 |
+
else:
|
635 |
+
risk_level = 'Low'
|
636 |
abuse_scores = [r[0][0] for r in results]
|
637 |
top_labels = [r[0][1][0] if r[0][1] else r[0][2][0][0] for r in results]
|
638 |
top_scores = [r[0][2][0][1] for r in results]
|