SamanthaStorm commited on
Commit
3466798
·
verified ·
1 Parent(s): f50e11c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -4
app.py CHANGED
@@ -12,13 +12,13 @@ model_name = "SamanthaStorm/abuse-pattern-detector-v2"
12
  model = RobertaForSequenceClassification.from_pretrained(model_name, trust_remote_code=True)
13
  tokenizer = RobertaTokenizer.from_pretrained(model_name, trust_remote_code=True)
14
 
15
- # Define labels (17 total)
16
  LABELS = [
17
  "gaslighting", "mockery", "dismissiveness", "control",
18
  "guilt_tripping", "apology_baiting", "blame_shifting", "projection",
19
  "contradictory_statements", "manipulation", "deflection", "insults",
20
- "obscure_formal", "recovery_phase", "suicidal_threat", "physical_threat",
21
- "extreme_control", "non_abusive"
22
  ]
23
 
24
  # Custom thresholds for each label (make sure these match your original settings)
@@ -44,7 +44,7 @@ THRESHOLDS = {
44
 
45
  }
46
 
47
- # Define label groups using slicing (first 14: abuse patterns, last 3: danger cues)
48
  PATTERN_LABELS = LABELS[:15]
49
  DANGER_LABELS = LABELS[15:18]
50
 
 
12
  model = RobertaForSequenceClassification.from_pretrained(model_name, trust_remote_code=True)
13
  tokenizer = RobertaTokenizer.from_pretrained(model_name, trust_remote_code=True)
14
 
15
+ # Define labels (18 total)
16
  LABELS = [
17
  "gaslighting", "mockery", "dismissiveness", "control",
18
  "guilt_tripping", "apology_baiting", "blame_shifting", "projection",
19
  "contradictory_statements", "manipulation", "deflection", "insults",
20
+ "obscure_formal", "recovery_phase", "non_abusive", "suicidal_threat", "physical_threat",
21
+ "extreme_control"
22
  ]
23
 
24
  # Custom thresholds for each label (make sure these match your original settings)
 
44
 
45
  }
46
 
47
+ # Define label groups using slicing (first 15: abuse patterns, last 3: danger cues)
48
  PATTERN_LABELS = LABELS[:15]
49
  DANGER_LABELS = LABELS[15:18]
50