SamanthaStorm commited on
Commit
d20c9bb
·
verified ·
1 Parent(s): 01a3a38

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +7 -6
app.py CHANGED
@@ -37,15 +37,16 @@ THRESHOLDS = {
37
  "insults": 0.34,
38
  "obscure_formal": 0.25,
39
  "recovery_phase": 0.25,
 
40
  "suicidal_threat": 0.45,
41
  "physical_threat": 0.31,
42
  "extreme_control": 0.36,
43
- "non_abusive": 0.40
44
  }
45
 
46
  # Define label groups using slicing (first 14: abuse patterns, last 3: danger cues)
47
- PATTERN_LABELS = LABELS[:14]
48
- DANGER_LABELS = LABELS[14:17]
49
 
50
  def calculate_abuse_level(scores, thresholds):
51
  triggered_scores = [score for label, score in zip(LABELS, scores) if score > thresholds[label]]
@@ -106,8 +107,8 @@ def analyze_messages(input_text):
106
  scores = torch.sigmoid(outputs.logits.squeeze(0)).numpy()
107
 
108
  # Count the number of triggered abuse pattern and danger flags based on thresholds
109
- pattern_count = sum(score > adjusted_thresholds[label] for label, score in zip(PATTERN_LABELS, scores[:14]))
110
- danger_flag_count = sum(score > adjusted_thresholds[label] for label, score in zip(DANGER_LABELS, scores[14:17]))
111
 
112
  # Check if 'non_abusive' label is triggered
113
  non_abusive_score = scores[LABELS.index('non_abusive')]
@@ -132,7 +133,7 @@ def analyze_messages(input_text):
132
  resources = "For more information on abuse patterns, consider reaching out to support groups or professional counselors."
133
 
134
  # Get top 2 highest scoring abuse patterns (excluding 'non_abusive')
135
- scored_patterns = [(label, score) for label, score in zip(PATTERN_LABELS, scores[:14])]
136
  top_patterns = sorted(scored_patterns, key=lambda x: x[1], reverse=True)[:2]
137
  top_patterns_str = "\n".join([f"• {label.replace('_', ' ').title()}" for label, _ in top_patterns])
138
 
 
37
  "insults": 0.34,
38
  "obscure_formal": 0.25,
39
  "recovery_phase": 0.25,
40
+ "non_abusive": 0.40
41
  "suicidal_threat": 0.45,
42
  "physical_threat": 0.31,
43
  "extreme_control": 0.36,
44
+
45
  }
46
 
47
  # Define label groups using slicing (first 14: abuse patterns, last 3: danger cues)
48
+ PATTERN_LABELS = LABELS[:15]
49
+ DANGER_LABELS = LABELS[15:18]
50
 
51
  def calculate_abuse_level(scores, thresholds):
52
  triggered_scores = [score for label, score in zip(LABELS, scores) if score > thresholds[label]]
 
107
  scores = torch.sigmoid(outputs.logits.squeeze(0)).numpy()
108
 
109
  # Count the number of triggered abuse pattern and danger flags based on thresholds
110
+ pattern_count = sum(score > adjusted_thresholds[label] for label, score in zip(PATTERN_LABELS, scores[:15]))
111
+ danger_flag_count = sum(score > adjusted_thresholds[label] for label, score in zip(DANGER_LABELS, scores[15:18]))
112
 
113
  # Check if 'non_abusive' label is triggered
114
  non_abusive_score = scores[LABELS.index('non_abusive')]
 
133
  resources = "For more information on abuse patterns, consider reaching out to support groups or professional counselors."
134
 
135
  # Get top 2 highest scoring abuse patterns (excluding 'non_abusive')
136
+ scored_patterns = [(label, score) for label, score in zip(PATTERN_LABELS, scores[:15])]
137
  top_patterns = sorted(scored_patterns, key=lambda x: x[1], reverse=True)[:2]
138
  top_patterns_str = "\n".join([f"• {label.replace('_', ' ').title()}" for label, _ in top_patterns])
139