SamanthaStorm commited on
Commit
ee32b37
·
verified ·
1 Parent(s): 41daae0

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +14 -2
app.py CHANGED
@@ -155,7 +155,15 @@ def calculate_darvo_score(patterns, sentiment_before, sentiment_after, motifs_fo
155
  0.25 * motif_score +
156
  0.15 * contradiction_score, 1.0
157
  ), 3)
158
-
 
 
 
 
 
 
 
 
159
  def get_risk_stage(patterns, sentiment):
160
  if "threat" in patterns or "insults" in patterns:
161
  return 2
@@ -214,7 +222,7 @@ def analyze_single_message(text, thresholds):
214
  result = sst_pipeline(text)[0]
215
  sentiment = "supportive" if result['label'] == "POSITIVE" else "undermining"
216
  sentiment_score = result['score'] if sentiment == "undermining" else 0.0
217
-
218
  adjusted_thresholds = {
219
  k: v + 0.05 if sentiment == "supportive" else v
220
  for k, v in thresholds.items()
@@ -256,6 +264,10 @@ def analyze_single_message(text, thresholds):
256
  weight_sum += weight
257
 
258
  abuse_score_raw = (weighted_total / weight_sum) * 100
 
 
 
 
259
 
260
  if "threat" in threshold_labels or "control" in threshold_labels or "insults" in threshold_labels:
261
  abuse_score = min(abuse_score_raw, 100)
 
155
  0.25 * motif_score +
156
  0.15 * contradiction_score, 1.0
157
  ), 3)
158
+ def detect_weapon_language(text):
159
+ weapon_keywords = [
160
+ "knife", "knives", "stab", "cut you", "cutting",
161
+ "gun", "shoot", "rifle", "firearm", "pistol",
162
+ "bomb", "blow up", "grenade", "explode",
163
+ "weapon", "armed", "loaded", "kill you", "take you out"
164
+ ]
165
+ text_lower = text.lower()
166
+ return any(word in text_lower for word in weapon_keywords)
167
  def get_risk_stage(patterns, sentiment):
168
  if "threat" in patterns or "insults" in patterns:
169
  return 2
 
222
  result = sst_pipeline(text)[0]
223
  sentiment = "supportive" if result['label'] == "POSITIVE" else "undermining"
224
  sentiment_score = result['score'] if sentiment == "undermining" else 0.0
225
+ weapon_flag = detect_weapon_language(text)
226
  adjusted_thresholds = {
227
  k: v + 0.05 if sentiment == "supportive" else v
228
  for k, v in thresholds.items()
 
264
  weight_sum += weight
265
 
266
  abuse_score_raw = (weighted_total / weight_sum) * 100
267
+ if weapon_flag:
268
+ abuse_score_raw = min(abuse_score_raw + 25, 100) # boost intensity
269
+ if weapon_flag and stage < 2:
270
+ stage = 2
271
 
272
  if "threat" in threshold_labels or "control" in threshold_labels or "insults" in threshold_labels:
273
  abuse_score = min(abuse_score_raw, 100)