SamanthaStorm commited on
Commit
8bb30c0
·
verified ·
1 Parent(s): 07cd99a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -1
app.py CHANGED
@@ -264,17 +264,20 @@ def analyze_single_message(text, thresholds):
264
  weight_sum += weight
265
 
266
  abuse_score_raw = (weighted_total / weight_sum) * 100
 
267
  if weapon_flag:
268
  abuse_score_raw = min(abuse_score_raw + 25, 100) # boost intensity
269
  if weapon_flag and stage < 2:
270
  stage = 2
 
 
271
 
272
  if "threat" in threshold_labels or "control" in threshold_labels or "insults" in threshold_labels:
273
  abuse_score = min(abuse_score_raw, 100)
274
  else:
275
  abuse_score = min(abuse_score_raw, 95)
276
 
277
- stage = get_risk_stage(threshold_labels, sentiment)
278
 
279
  print("\n--- Debug Info ---")
280
  print(f"Text: {text}")
 
264
  weight_sum += weight
265
 
266
  abuse_score_raw = (weighted_total / weight_sum) * 100
267
+ stage = get_risk_stage(threshold_labels, sentiment)
268
  if weapon_flag:
269
  abuse_score_raw = min(abuse_score_raw + 25, 100) # boost intensity
270
  if weapon_flag and stage < 2:
271
  stage = 2
272
+ if weapon_flag:
273
+ print("⚠️ Weapon-related language detected.")
274
 
275
  if "threat" in threshold_labels or "control" in threshold_labels or "insults" in threshold_labels:
276
  abuse_score = min(abuse_score_raw, 100)
277
  else:
278
  abuse_score = min(abuse_score_raw, 95)
279
 
280
+
281
 
282
  print("\n--- Debug Info ---")
283
  print(f"Text: {text}")