SamanthaStorm commited on
Commit
7f5cc04
·
verified ·
1 Parent(s): 6481f86

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +14 -1
app.py CHANGED
@@ -200,7 +200,20 @@ def analyze_single_message(text):
200
  inp = tokenizer(text, return_tensors='pt', truncation=True, padding=True)
201
  with torch.no_grad(): logits=model(**inp).logits.squeeze(0)
202
  probs=torch.sigmoid(logits).numpy()
203
- labels=[lab for lab,p in zip(LABELS,probs) if p>THRESHOLDS[lab]]
 
 
 
 
 
 
 
 
 
 
 
 
 
204
  # abuse score
205
  total_w=sum(PATTERN_WEIGHTS.get(l,1.0) for l in LABELS)
206
  abuse_score=int(round(sum(probs[i]*PATTERN_WEIGHTS.get(l,1.0)
 
200
  inp = tokenizer(text, return_tensors='pt', truncation=True, padding=True)
201
  with torch.no_grad(): logits=model(**inp).logits.squeeze(0)
202
  probs=torch.sigmoid(logits).numpy()
203
+ # …run tokenizer, get `probs` and then:
204
+ labels = [lab for lab,p in zip(LABELS, probs) if p > THRESHOLDS[lab]]
205
+
206
+ # **NEW**: if absolutely no pattern is detected, force a zero‐abuse “healthy” return:
207
+ if not labels:
208
+ return {
209
+ "abuse_score": 0,
210
+ "labels": [],
211
+ "sentiment": "supportive",
212
+ "stage": 4,
213
+ "darvo_score": 0.0,
214
+ "top_patterns": []
215
+ }
216
+
217
  # abuse score
218
  total_w=sum(PATTERN_WEIGHTS.get(l,1.0) for l in LABELS)
219
  abuse_score=int(round(sum(probs[i]*PATTERN_WEIGHTS.get(l,1.0)