SamanthaStorm commited on
Commit
68ecdb1
·
verified ·
1 Parent(s): b390ecc

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +30 -28
app.py CHANGED
@@ -75,38 +75,40 @@ def analyze_messages(input_text):
75
  # Count the number of triggered abuse pattern and danger flags based on thresholds
76
  pattern_count = sum(score > THRESHOLDS[label] for label, score in zip(PATTERN_LABELS, scores[:14]))
77
  danger_flag_count = sum(score > THRESHOLDS[label] for label, score in zip(DANGER_LABELS, scores[14:17]))
 
78
  # Build formatted raw score display
79
- score_lines = [
80
- f"{label:25}: {score:.3f}" for label, score in zip(PATTERN_LABELS + DANGER_LABELS, scores)
81
- ]
82
- raw_score_output = "\n".join(score_lines)
83
 
84
- # Calculate overall abuse level and interpret it
85
- abuse_level = calculate_abuse_level(scores, THRESHOLDS)
86
- abuse_description = interpret_abuse_level(abuse_level)
87
 
88
  # Resource logic based on the number of danger cues
89
- if danger_flag_count >= 2:
90
- resources = "Immediate assistance recommended. Please seek professional help or contact emergency services."
91
- else:
92
- resources = "For more information on abuse patterns, consider reaching out to support groups or professional counselors."
93
-
94
- # Get top 2 highest scoring abuse patterns (excluding 'non_abusive')
95
- scored_patterns = [(label, score) for label, score in zip(PATTERN_LABELS, scores[:14])]
96
- top_patterns = sorted(scored_patterns, key=lambda x: x[1], reverse=True)[:2]
97
- top_patterns_str = "\n".join([f"• {label.replace('_', ' ').title()}" for label, _ in top_patterns])
98
-
99
- # Format final result
100
- result = (
101
- f"Abuse Risk Score: {abuse_level}% – {abuse_description}\n"
102
- "This message contains signs of emotionally harmful communication that may indicate abusive patterns.\n\n"
103
- f"Most Likely Patterns:\n{top_patterns_str}\n\n"
104
- f"⚠️ Critical Danger Flags Detected: {danger_flag_count} of 3\n"
105
- "The Danger Assessment is a validated tool that helps identify serious risk in intimate partner violence. "
106
- "It flags communication patterns associated with increased risk of severe harm. "
107
- "For more info, consider reaching out to support groups or professionals.\n\n"
108
- f"Resources: {resources}"
109
- )
 
110
  # Return both a text summary and a JSON-like dict of scores per label
111
  return result
112
 
 
75
  # Count the number of triggered abuse pattern and danger flags based on thresholds
76
  pattern_count = sum(score > THRESHOLDS[label] for label, score in zip(PATTERN_LABELS, scores[:14]))
77
  danger_flag_count = sum(score > THRESHOLDS[label] for label, score in zip(DANGER_LABELS, scores[14:17]))
78
+
79
  # Build formatted raw score display
80
+ score_lines = [
81
+ f"{label:25}: {score:.3f}" for label, score in zip(PATTERN_LABELS + DANGER_LABELS, scores)
82
+ ]
83
+ raw_score_output = "\n".join(score_lines)
84
 
85
+ # Calculate overall abuse level and interpret it
86
+ abuse_level = calculate_abuse_level(scores, THRESHOLDS)
87
+ abuse_description = interpret_abuse_level(abuse_level)
88
 
89
  # Resource logic based on the number of danger cues
90
+ if danger_flag_count >= 2:
91
+ resources = "Immediate assistance recommended. Please seek professional help or contact emergency services."
92
+ else:
93
+ resources = "For more information on abuse patterns, consider reaching out to support groups or professional counselors."
94
+
95
+ # Get top 2 highest scoring abuse patterns (excluding 'non_abusive')
96
+ scored_patterns = [(label, score) for label, score in zip(PATTERN_LABELS, scores[:14])]
97
+ top_patterns = sorted(scored_patterns, key=lambda x: x[1], reverse=True)[:2]
98
+ top_patterns_str = "\n".join([f"• {label.replace('_', ' ').title()}" for label, _ in top_patterns])
99
+
100
+ # Format final result
101
+ result = (
102
+ f"Abuse Risk Score: {abuse_level}% – {abuse_description}\n"
103
+ "This message contains signs of emotionally harmful communication that may indicate abusive patterns.\n\n"
104
+ f"Most Likely Patterns:\n{top_patterns_str}\n\n"
105
+ f"⚠️ Critical Danger Flags Detected: {danger_flag_count} of 3\n"
106
+ "The Danger Assessment is a validated tool that helps identify serious risk in intimate partner violence. "
107
+ "It flags communication patterns associated with increased risk of severe harm. "
108
+ "For more info, consider reaching out to support groups or professionals.\n\n"
109
+ f"Resources: {resources}"
110
+ )
111
+
112
  # Return both a text summary and a JSON-like dict of scores per label
113
  return result
114