SamanthaStorm commited on
Commit
a28ef35
·
verified ·
1 Parent(s): 9519fb4

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +29 -9
app.py CHANGED
@@ -134,18 +134,21 @@ def analyze_messages(input_text, risk_flags):
134
  scored_patterns = [
135
  (label, score) for label, score in zip(PATTERN_LABELS, scores[:15]) if label != "non_abusive"]
136
 
137
- pattern_labels_used = [label for label, score in scored_patterns if score > adjusted_thresholds[label]]
138
-
139
- abuse_level = calculate_abuse_level(scores, adjusted_thresholds)
 
 
 
140
  abuse_description = interpret_abuse_level(abuse_level)
141
- pattern_labels_used = [label for label, _ in matched_phrases]
142
  abuse_type, abuser_profile, advice = determine_abuse_type(pattern_labels_used)
143
 
144
  if danger_flag_count >= 2:
145
  resources = "Immediate assistance recommended. Please seek professional help or contact emergency services."
146
  else:
147
  resources = "For more information on abuse patterns, consider reaching out to support groups or professional counselors."
148
-
149
  # Override top patterns if a high-risk motif was detected
150
  override_labels = {"physical_threat", "suicidal_threat", "extreme_control"}
151
  override_matches = [label for label, _ in matched_phrases if label in override_labels]
@@ -154,12 +157,11 @@ def analyze_messages(input_text, risk_flags):
154
  top_patterns = [(label, 1.0) for label in override_matches]
155
  else:
156
  top_patterns = sorted(scored_patterns, key=lambda x: x[1], reverse=True)[:2]
157
- top_patterns = sorted(scored_patterns, key=lambda x: x[1], reverse=True)[:2]
158
 
159
  top_pattern_explanations = "\n".join([
160
  f"• {label.replace('_', ' ').title()}: {EXPLANATIONS.get(label, 'No explanation available.')}"
161
  for label, _ in top_patterns
162
- ])
163
 
164
  result = f"Abuse Risk Score: {abuse_level}% – {abuse_description}\n\n"
165
 
@@ -168,8 +170,26 @@ def analyze_messages(input_text, risk_flags):
168
 
169
  result += f"⚠️ Critical Danger Flags Detected: {danger_flag_count} of 3\n"
170
  result += f"Resources: {resources}\n"
171
- result += f"🧭 Sentiment: {sentiment_label.title()} (Confidence: {sentiment_score*100:.2f}%)\n"
172
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
173
  # THEN immediately follow with this:
174
  if contextual_flags:
175
  result += "\n\n⚠️ You indicated the following:\n" + "\n".join([f"• {flag.replace('_', ' ').title()}" for flag in contextual_flags])
 
134
  scored_patterns = [
135
  (label, score) for label, score in zip(PATTERN_LABELS, scores[:15]) if label != "non_abusive"]
136
 
137
+ pattern_labels_used = list(set(
138
+ [label for label, score in scored_patterns if score > adjusted_thresholds[label]] +
139
+ [label for label, _ in matched_phrases]
140
+ ))
141
+
142
+ abuse_level = calculate_abuse_level(scores, adjusted_thresholds, motif_hits=[label for label, _ in matched_phrases])
143
  abuse_description = interpret_abuse_level(abuse_level)
144
+
145
  abuse_type, abuser_profile, advice = determine_abuse_type(pattern_labels_used)
146
 
147
  if danger_flag_count >= 2:
148
  resources = "Immediate assistance recommended. Please seek professional help or contact emergency services."
149
  else:
150
  resources = "For more information on abuse patterns, consider reaching out to support groups or professional counselors."
151
+
152
  # Override top patterns if a high-risk motif was detected
153
  override_labels = {"physical_threat", "suicidal_threat", "extreme_control"}
154
  override_matches = [label for label, _ in matched_phrases if label in override_labels]
 
157
  top_patterns = [(label, 1.0) for label in override_matches]
158
  else:
159
  top_patterns = sorted(scored_patterns, key=lambda x: x[1], reverse=True)[:2]
 
160
 
161
  top_pattern_explanations = "\n".join([
162
  f"• {label.replace('_', ' ').title()}: {EXPLANATIONS.get(label, 'No explanation available.')}"
163
  for label, _ in top_patterns
164
+ ])
165
 
166
  result = f"Abuse Risk Score: {abuse_level}% – {abuse_description}\n\n"
167
 
 
170
 
171
  result += f"⚠️ Critical Danger Flags Detected: {danger_flag_count} of 3\n"
172
  result += f"Resources: {resources}\n"
173
+ result += f"🧠 Sentiment: {sentiment_label.title()} (Confidence: {sentiment_score*100:.2f}%)\n"
174
+
175
+ # Always include these if available:
176
+ if contextual_flags:
177
+ result += "\n\n⚠️ You indicated the following:\n" + "\n".join([f"• {flag.replace('_', ' ').title()}" for flag in contextual_flags])
178
+
179
+ if high_risk_context:
180
+ result += "\n\n🚨 These responses suggest a high-risk situation. Consider seeking immediate help or safety planning resources."
181
+
182
+ if matched_phrases:
183
+ result += "\n\n🚨 Detected High-Risk Phrases:\n"
184
+ for label, phrase in matched_phrases:
185
+ phrase_clean = phrase.replace('"', "'").strip()
186
+ result += f"• {label.replace('_', ' ').title()}: “{phrase_clean}”\n"
187
+
188
+ # Always show abuse type if available
189
+ if abuse_type:
190
+ result += f"\n\n🧠 Likely Abuse Type: {abuse_type}"
191
+ result += f"\n🧠 Abuser Profile: {abuser_profile}"
192
+ result += f"\n📘 Safety Tip: {advice}"
193
  # THEN immediately follow with this:
194
  if contextual_flags:
195
  result += "\n\n⚠️ You indicated the following:\n" + "\n".join([f"• {flag.replace('_', ' ').title()}" for flag in contextual_flags])