SamanthaStorm commited on
Commit
6e056de
·
verified ·
1 Parent(s): c5ab02a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +7 -3
app.py CHANGED
@@ -348,18 +348,22 @@ def get_risk_stage(patterns, sentiment):
348
  return 1
349
 
350
  def generate_risk_snippet(abuse_score, top_label, escalation_score, stage):
351
- # Extract aggression score from the top label if it's aggression
 
 
352
  if isinstance(top_label, str) and "aggression" in top_label.lower():
353
  try:
354
- aggression_score = int(top_label.split("–")[1].replace("%", "").strip()) / 100
 
355
  except:
356
  aggression_score = 0
357
  else:
358
  aggression_score = 0
359
 
 
360
  if abuse_score >= 85 or escalation_score >= 16:
361
  risk_level = "high"
362
- elif abuse_score >= 60 or escalation_score >= 8 or aggression_score >= 0.75:
363
  risk_level = "moderate"
364
  elif stage == 2 and abuse_score >= 40:
365
  risk_level = "moderate"
 
348
  return 1
349
 
350
  def generate_risk_snippet(abuse_score, top_label, escalation_score, stage):
351
+ import re
352
+
353
+ # Extract aggression score if aggression is detected
354
  if isinstance(top_label, str) and "aggression" in top_label.lower():
355
  try:
356
+ match = re.search(r"\(?(\d+)\%?\)?", top_label)
357
+ aggression_score = int(match.group(1)) / 100 if match else 0
358
  except:
359
  aggression_score = 0
360
  else:
361
  aggression_score = 0
362
 
363
+ # Revised risk logic
364
  if abuse_score >= 85 or escalation_score >= 16:
365
  risk_level = "high"
366
+ elif abuse_score >= 60 or escalation_score >= 8 or aggression_score >= 0.25:
367
  risk_level = "moderate"
368
  elif stage == 2 and abuse_score >= 40:
369
  risk_level = "moderate"