SamanthaStorm commited on
Commit
d23b4c6
·
verified ·
1 Parent(s): 479c580

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -6
app.py CHANGED
@@ -28,7 +28,7 @@ THRESHOLDS = {
28
  "gaslighting": 0.15,
29
  "mockery": 0.15,
30
  "dismissiveness": 0.15,
31
- "control": 0.15,
32
  "guilt_tripping": 0.15,
33
  "apology_baiting": 0.15,
34
  "blame_shifting": 0.15,
@@ -39,9 +39,9 @@ THRESHOLDS = {
39
  "insults": 0.15,
40
  "obscure_formal": 0.15,
41
  "recovery_phase": 0.15,
42
- "suicidal_threat": 0.10,
43
- "physical_threat": 0.10,
44
- "extreme_control": 0.10
45
  }
46
  def analyze_messages(input_text):
47
  input_text = input_text.strip()
@@ -82,8 +82,8 @@ def analyze_messages(input_text):
82
  )
83
  # Treat high-scoring danger cues as abuse patterns as well
84
  for danger_label in ["suicidal_threat", "physical_threat", "extreme_control"]:
85
- if scores[LABELS.index(danger_label)] > THRESHOLDS[danger_label]:
86
- pattern_count += 1
87
  # Set resources
88
  if danger_assessment == "High":
89
  resources = (
 
28
  "gaslighting": 0.15,
29
  "mockery": 0.15,
30
  "dismissiveness": 0.15,
31
+ "control": 0.08,
32
  "guilt_tripping": 0.15,
33
  "apology_baiting": 0.15,
34
  "blame_shifting": 0.15,
 
39
  "insults": 0.15,
40
  "obscure_formal": 0.15,
41
  "recovery_phase": 0.15,
42
+ "suicidal_threat": 0.30,
43
+ "physical_threat": 0.30,
44
+ "extreme_control": 0.02
45
  }
46
  def analyze_messages(input_text):
47
  input_text = input_text.strip()
 
82
  )
83
  # Treat high-scoring danger cues as abuse patterns as well
84
  for danger_label in ["suicidal_threat", "physical_threat", "extreme_control"]:
85
+ if scores[LABELS.index(danger_label)] > THRESHOLDS[danger_label]:
86
+ pattern_count += 1
87
  # Set resources
88
  if danger_assessment == "High":
89
  resources = (