SamanthaStorm commited on
Commit
42d279c
·
verified ·
1 Parent(s): 92394e7

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +11 -1
app.py CHANGED
@@ -80,6 +80,9 @@ def calculate_abuse_level(scores, thresholds, motif_hits=None):
80
  weight = PATTERN_WEIGHTS.get(label, 1.0)
81
  weighted_scores.append(score * weight)
82
  base_score = round(np.mean(weighted_scores) * 100, 2) if weighted_scores else 0.0
 
 
 
83
 
84
  motif_hits = motif_hits or []
85
  if any(label in motif_hits for label in {"physical_threat", "suicidal_threat", "extreme_control"}):
@@ -102,6 +105,7 @@ def analyze_messages(input_text, risk_flags):
102
  input_text = input_text.strip()
103
  if not input_text:
104
  return "Please enter a message for analysis."
 
105
 
106
  motif_flags, matched_phrases = detect_motifs(input_text)
107
  risk_flags = list(set(risk_flags + motif_flags)) if risk_flags else motif_flags
@@ -119,11 +123,17 @@ def analyze_messages(input_text, risk_flags):
119
 
120
  pattern_labels_used = list(set(
121
  [label for label, score in zip(PATTERN_LABELS, scores[:15]) if score > adjusted_thresholds[label]] +
122
- [label for label, _ in matched_phrases]
 
 
 
123
  ))
124
 
125
  abuse_level = calculate_abuse_level(scores, adjusted_thresholds, motif_hits=[label for label, _ in matched_phrases])
126
  abuse_description = interpret_abuse_level(abuse_level)
 
 
 
127
 
128
  abuse_type, abuser_profile, advice = determine_abuse_type(pattern_labels_used)
129
 
 
80
  weight = PATTERN_WEIGHTS.get(label, 1.0)
81
  weighted_scores.append(score * weight)
82
  base_score = round(np.mean(weighted_scores) * 100, 2) if weighted_scores else 0.0
83
+ # Escalate risk if user checks a critical context box
84
+ if context_flags and abuse_level < 15:
85
+ abuse_level = 15 # bump to at least Mild Concern
86
 
87
  motif_hits = motif_hits or []
88
  if any(label in motif_hits for label in {"physical_threat", "suicidal_threat", "extreme_control"}):
 
105
  input_text = input_text.strip()
106
  if not input_text:
107
  return "Please enter a message for analysis."
108
+ normalized_text = input_text.strip().lower()
109
 
110
  motif_flags, matched_phrases = detect_motifs(input_text)
111
  risk_flags = list(set(risk_flags + motif_flags)) if risk_flags else motif_flags
 
123
 
124
  pattern_labels_used = list(set(
125
  [label for label, score in zip(PATTERN_LABELS, scores[:15]) if score > adjusted_thresholds[label]] +
126
+ matched_phrases = [
127
+ label for label in PATTERN_LABELS
128
+ if label.lower() in normalized_text
129
+ ]
130
  ))
131
 
132
  abuse_level = calculate_abuse_level(scores, adjusted_thresholds, motif_hits=[label for label, _ in matched_phrases])
133
  abuse_description = interpret_abuse_level(abuse_level)
134
+ # Escalate risk if user checks a critical context box
135
+ if context_flags and abuse_level < 15:
136
+ abuse_level = 15 # bump to at least Mild Concern
137
 
138
  abuse_type, abuser_profile, advice = determine_abuse_type(pattern_labels_used)
139