SamanthaStorm commited on
Commit
e178791
·
verified ·
1 Parent(s): 99463df

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -4
app.py CHANGED
@@ -40,8 +40,8 @@ THRESHOLDS = {
40
  }
41
 
42
  # Define label groups using slicing (first 14: abuse patterns, last 3: danger cues)
43
- PATTERN_LABELS = LABELS[:17]
44
- DANGER_LABELS = LABELS[17:]
45
 
46
  def calculate_abuse_level(scores, thresholds):
47
  triggered_scores = [score for label, score in zip(LABELS, scores) if score > thresholds[label]]
@@ -73,8 +73,8 @@ def analyze_messages(input_text):
73
  scores = torch.sigmoid(outputs.logits.squeeze(0)).numpy()
74
 
75
  # Count the number of triggered abuse pattern and danger flags based on thresholds
76
- pattern_count = sum(score > THRESHOLDS[label] for label, score in zip(PATTERN_LABELS, scores[:17]))
77
- danger_flag_count = sum(score > THRESHOLDS[label] for label, score in zip(DANGER_LABELS, scores[17:]))
78
  # Build formatted raw score display
79
  score_lines = [
80
  f"{label:25}: {score:.3f}" for label, score in zip(PATTERN_LABELS + DANGER_LABELS, scores)
 
40
  }
41
 
42
  # Define label groups using slicing (first 14: abuse patterns, last 3: danger cues)
43
+ PATTERN_LABELS = LABELS[:14]
44
+ DANGER_LABELS = LABELS[:3]
45
 
46
  def calculate_abuse_level(scores, thresholds):
47
  triggered_scores = [score for label, score in zip(LABELS, scores) if score > thresholds[label]]
 
73
  scores = torch.sigmoid(outputs.logits.squeeze(0)).numpy()
74
 
75
  # Count the number of triggered abuse pattern and danger flags based on thresholds
76
+ pattern_count = sum(score > THRESHOLDS[label] for label, score in zip(PATTERN_LABELS, scores[:14]))
77
+ danger_flag_count = sum(score > THRESHOLDS[label] for label, score in zip(DANGER_LABELS, scores[3:]))
78
  # Build formatted raw score display
79
  score_lines = [
80
  f"{label:25}: {score:.3f}" for label, score in zip(PATTERN_LABELS + DANGER_LABELS, scores)