SamanthaStorm commited on
Commit
bc7801b
·
verified ·
1 Parent(s): bb14fb2

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -16
app.py CHANGED
@@ -257,33 +257,19 @@ def analyze_single_message(text, thresholds):
257
  reverse=True
258
  )[:2]
259
 
260
- # Compute weighted average across all patterns (not just top 2)
261
- # ✅ Only include passed labels in abuse intensity calculation
262
- matched_scores = [
263
- (label, score, PATTERN_WEIGHTS.get(label, 1.0))
264
- for label, score in zip(LABELS, scores)
265
- if score > adjusted_thresholds[label]
266
- ]
267
-
268
- if matched_scores:
269
- weighted_total = sum(score * weight for _, score, weight in matched_scores)
270
- weight_sum = sum(weight for _, _, weight in matched_scores)
271
- abuse_score_raw = (weighted_total / weight_sum) * 100
272
- else:
273
- abuse_score_raw = 0
274
-
275
  matched_scores = [
276
  (label, score, PATTERN_WEIGHTS.get(label, 1.0))
277
  for label, score in zip(LABELS, scores)
278
  if score > adjusted_thresholds[label]
279
  ]
280
-
281
  if matched_scores:
282
  weighted_total = sum(score * weight for _, score, weight in matched_scores)
283
  weight_sum = sum(weight for _, _, weight in matched_scores)
284
  abuse_score_raw = (weighted_total / weight_sum) * 100
285
  else:
286
  abuse_score_raw = 0
 
 
287
  if threshold_labels:
288
  stage = get_risk_stage(threshold_labels, sentiment)
289
  else:
 
257
  reverse=True
258
  )[:2]
259
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
260
  matched_scores = [
261
  (label, score, PATTERN_WEIGHTS.get(label, 1.0))
262
  for label, score in zip(LABELS, scores)
263
  if score > adjusted_thresholds[label]
264
  ]
 
265
  if matched_scores:
266
  weighted_total = sum(score * weight for _, score, weight in matched_scores)
267
  weight_sum = sum(weight for _, _, weight in matched_scores)
268
  abuse_score_raw = (weighted_total / weight_sum) * 100
269
  else:
270
  abuse_score_raw = 0
271
+ print(f"Matched patterns used for abuse scoring: {[(l, round(s, 3)) for l, s, _ in matched_scores]}")
272
+ print(f"Abuse Score Raw: {round(abuse_score_raw, 1)}")
273
  if threshold_labels:
274
  stage = get_risk_stage(threshold_labels, sentiment)
275
  else: