SamanthaStorm commited on
Commit
1bd370c
·
verified ·
1 Parent(s): a6c4863

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -4
app.py CHANGED
@@ -121,7 +121,10 @@ def analyze_messages(input_text, risk_flags):
121
  threshold_labels = [label for label, score in zip(PATTERN_LABELS, scores[:15]) if score > adjusted_thresholds[label]]
122
  phrase_labels = [label for label, _ in matched_phrases]
123
  pattern_labels_used = list(set(threshold_labels + phrase_labels))
124
-
 
 
 
125
  abuse_level = calculate_abuse_level(scores, adjusted_thresholds, motif_hits=[label for label, _ in matched_phrases])
126
  abuse_description = interpret_abuse_level(abuse_level)
127
  # Escalate risk if user checks a critical context box
@@ -131,9 +134,6 @@ def analyze_messages(input_text, risk_flags):
131
  abuse_type, abuser_profile, advice = determine_abuse_type(pattern_labels_used)
132
 
133
  danger_flag_count = sum(score > adjusted_thresholds[label] for label, score in zip(DANGER_LABELS, scores[15:18]))
134
- contextual_flags = risk_flags if risk_flags else []
135
- if len(contextual_flags) >= 2:
136
- danger_flag_count += 1
137
 
138
  critical_flags = ["They've threatened harm", "They monitor/follow me", "I feel unsafe when alone with them"]
139
  high_risk_context = any(flag in contextual_flags for flag in critical_flags)
 
121
  threshold_labels = [label for label, score in zip(PATTERN_LABELS, scores[:15]) if score > adjusted_thresholds[label]]
122
  phrase_labels = [label for label, _ in matched_phrases]
123
  pattern_labels_used = list(set(threshold_labels + phrase_labels))
124
+
125
+ contextual_flags = risk_flags if risk_flags else []
126
+ if len(contextual_flags) >= 2:
127
+ danger_flag_count += 1
128
  abuse_level = calculate_abuse_level(scores, adjusted_thresholds, motif_hits=[label for label, _ in matched_phrases])
129
  abuse_description = interpret_abuse_level(abuse_level)
130
  # Escalate risk if user checks a critical context box
 
134
  abuse_type, abuser_profile, advice = determine_abuse_type(pattern_labels_used)
135
 
136
  danger_flag_count = sum(score > adjusted_thresholds[label] for label, score in zip(DANGER_LABELS, scores[15:18]))
 
 
 
137
 
138
  critical_flags = ["They've threatened harm", "They monitor/follow me", "I feel unsafe when alone with them"]
139
  high_risk_context = any(flag in contextual_flags for flag in critical_flags)