SamanthaStorm commited on
Commit
8d81cde
Β·
verified Β·
1 Parent(s): bc7801b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +19 -21
app.py CHANGED
@@ -225,6 +225,7 @@ def analyze_single_message(text, thresholds):
225
  sentiment = "supportive" if result['label'] == "POSITIVE" else "undermining"
226
  sentiment_score = result['score'] if sentiment == "undermining" else 0.0
227
  weapon_flag = detect_weapon_language(text)
 
228
  adjusted_thresholds = {
229
  k: v + 0.05 if sentiment == "supportive" else v
230
  for k, v in thresholds.items()
@@ -240,7 +241,7 @@ def analyze_single_message(text, thresholds):
240
  threshold_labels = [
241
  label for label, score in zip(LABELS, scores)
242
  if score > adjusted_thresholds[label]
243
- ]
244
 
245
  motifs = [phrase for _, phrase in matched_phrases]
246
 
@@ -250,43 +251,38 @@ def analyze_single_message(text, thresholds):
250
  sentiment_after=sentiment_score,
251
  motifs_found=motifs,
252
  contradiction_flag=contradiction_flag
253
- )
 
 
254
  top_patterns = sorted(
255
  [(label, score) for label, score in zip(LABELS, scores)],
256
  key=lambda x: x[1],
257
  reverse=True
258
  )[:2]
259
 
 
260
  matched_scores = [
261
- (label, score, PATTERN_WEIGHTS.get(label, 1.0))
262
- for label, score in zip(LABELS, scores)
263
- if score > adjusted_thresholds[label]
264
- ]
 
265
  if matched_scores:
266
  weighted_total = sum(score * weight for _, score, weight in matched_scores)
267
  weight_sum = sum(weight for _, _, weight in matched_scores)
268
  abuse_score_raw = (weighted_total / weight_sum) * 100
269
  else:
270
  abuse_score_raw = 0
271
- print(f"Matched patterns used for abuse scoring: {[(l, round(s, 3)) for l, s, _ in matched_scores]}")
272
- print(f"Abuse Score Raw: {round(abuse_score_raw, 1)}")
273
- if threshold_labels:
274
- stage = get_risk_stage(threshold_labels, sentiment)
275
- else:
276
- stage = 1 # default to tension-building if no patterns matched
277
- if weapon_flag:
278
- abuse_score_raw = min(abuse_score_raw + 25, 100) # boost intensity
279
  if weapon_flag and stage < 2:
280
  stage = 2
281
- if weapon_flag:
282
- print("⚠️ Weapon-related language detected.")
283
 
284
- if "threat" in threshold_labels or "control" in threshold_labels or "insults" in threshold_labels:
285
- abuse_score = min(abuse_score_raw, 100)
286
- else:
287
- abuse_score = min(abuse_score_raw, 95)
288
 
289
-
290
 
291
  print("\n--- Debug Info ---")
292
  print(f"Text: {text}")
@@ -295,6 +291,8 @@ def analyze_single_message(text, thresholds):
295
  for label, score in zip(LABELS, scores):
296
  passed = "βœ…" if score > adjusted_thresholds[label] else "❌"
297
  print(f" {label:25} β†’ {score:.3f} {passed}")
 
 
298
  print(f"Motifs: {motifs}")
299
  print(f"Contradiction: {contradiction_flag}")
300
  print("------------------\n")
 
225
  sentiment = "supportive" if result['label'] == "POSITIVE" else "undermining"
226
  sentiment_score = result['score'] if sentiment == "undermining" else 0.0
227
  weapon_flag = detect_weapon_language(text)
228
+
229
  adjusted_thresholds = {
230
  k: v + 0.05 if sentiment == "supportive" else v
231
  for k, v in thresholds.items()
 
241
  threshold_labels = [
242
  label for label, score in zip(LABELS, scores)
243
  if score > adjusted_thresholds[label]
244
+ ]
245
 
246
  motifs = [phrase for _, phrase in matched_phrases]
247
 
 
251
  sentiment_after=sentiment_score,
252
  motifs_found=motifs,
253
  contradiction_flag=contradiction_flag
254
+ )
255
+
256
+ # Top 2 patterns for display (raw scores)
257
  top_patterns = sorted(
258
  [(label, score) for label, score in zip(LABELS, scores)],
259
  key=lambda x: x[1],
260
  reverse=True
261
  )[:2]
262
 
263
+ # βœ… Final abuse score: only use patterns that passed threshold
264
  matched_scores = [
265
+ (label, score, PATTERN_WEIGHTS.get(label, 1.0))
266
+ for label, score in zip(LABELS, scores)
267
+ if score > adjusted_thresholds[label]
268
+ ]
269
+
270
  if matched_scores:
271
  weighted_total = sum(score * weight for _, score, weight in matched_scores)
272
  weight_sum = sum(weight for _, _, weight in matched_scores)
273
  abuse_score_raw = (weighted_total / weight_sum) * 100
274
  else:
275
  abuse_score_raw = 0
276
+
277
+ # βœ… Always assign stage
278
+ stage = get_risk_stage(threshold_labels, sentiment) if threshold_labels else 1
 
 
 
 
 
279
  if weapon_flag and stage < 2:
280
  stage = 2
 
 
281
 
282
+ if weapon_flag:
283
+ abuse_score_raw = min(abuse_score_raw + 25, 100)
 
 
284
 
285
+ abuse_score = min(abuse_score_raw, 100 if "threat" in threshold_labels or "control" in threshold_labels else 95)
286
 
287
  print("\n--- Debug Info ---")
288
  print(f"Text: {text}")
 
291
  for label, score in zip(LABELS, scores):
292
  passed = "βœ…" if score > adjusted_thresholds[label] else "❌"
293
  print(f" {label:25} β†’ {score:.3f} {passed}")
294
+ print(f"Matched for score: {[(l, round(s, 3)) for l, s, _ in matched_scores]}")
295
+ print(f"Abuse Score Raw: {round(abuse_score_raw, 1)}")
296
  print(f"Motifs: {motifs}")
297
  print(f"Contradiction: {contradiction_flag}")
298
  print("------------------\n")