SamanthaStorm commited on
Commit
c127ba6
·
verified ·
1 Parent(s): 90d7e35

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +12 -11
app.py CHANGED
@@ -342,14 +342,20 @@ def compute_abuse_score(matched_scores, sentiment):
342
  def analyze_single_message(text, thresholds):
343
  motif_hits, matched_phrases = detect_motifs(text)
344
 
345
- # Derive sentiment from emotion intensity
346
  emotion_profile = get_emotion_profile(text)
347
  sentiment_score = emotion_profile.get("anger", 0) + emotion_profile.get("disgust", 0)
348
 
349
- # Override if neutral tone masks abusive content
 
 
 
 
 
 
350
  if emotion_profile.get("neutral", 0) > 0.85 and any(
351
- scores[label] > THRESHOLDS[label]
352
- for label in ["control", "threat", "blame shifting"]
353
  ):
354
  sentiment = "undermining"
355
  else:
@@ -364,11 +370,6 @@ def analyze_single_message(text, thresholds):
364
 
365
  contradiction_flag = detect_contradiction(text)
366
 
367
- inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True)
368
- with torch.no_grad():
369
- outputs = model(**inputs)
370
- scores = torch.sigmoid(outputs.logits.squeeze(0)).numpy()
371
-
372
  threshold_labels = [
373
  label for label, score in zip(LABELS, scores)
374
  if score > adjusted_thresholds[label]
@@ -408,11 +409,11 @@ def analyze_single_message(text, thresholds):
408
 
409
  abuse_score = min(abuse_score_raw, 100 if "threat" in threshold_labels or "control" in threshold_labels else 95)
410
 
411
- # Now that abuse_score is ready → get tone tag
412
  tone_tag = get_emotional_tone_tag(emotion_profile, sentiment, threshold_labels, abuse_score)
413
  print(f"Emotional Tone Tag: {tone_tag}")
414
 
415
- # Debug logging
416
  print("Emotion Profile:")
417
  for emotion, score in emotion_profile.items():
418
  print(f" {emotion.capitalize():10}: {score}")
 
342
  def analyze_single_message(text, thresholds):
343
  motif_hits, matched_phrases = detect_motifs(text)
344
 
345
+ # Get emotion profile
346
  emotion_profile = get_emotion_profile(text)
347
  sentiment_score = emotion_profile.get("anger", 0) + emotion_profile.get("disgust", 0)
348
 
349
+ # Get model scores first so they can be used in the neutral override
350
+ inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True)
351
+ with torch.no_grad():
352
+ outputs = model(**inputs)
353
+ scores = torch.sigmoid(outputs.logits.squeeze(0)).numpy()
354
+
355
+ # Sentiment override if neutral masks abuse
356
  if emotion_profile.get("neutral", 0) > 0.85 and any(
357
+ scores[label_idx] > thresholds[LABELS[label_idx]]
358
+ for label_idx in [LABELS.index(l) for l in ["control", "threat", "blame shifting"]]
359
  ):
360
  sentiment = "undermining"
361
  else:
 
370
 
371
  contradiction_flag = detect_contradiction(text)
372
 
 
 
 
 
 
373
  threshold_labels = [
374
  label for label, score in zip(LABELS, scores)
375
  if score > adjusted_thresholds[label]
 
409
 
410
  abuse_score = min(abuse_score_raw, 100 if "threat" in threshold_labels or "control" in threshold_labels else 95)
411
 
412
+ # Get tone tag
413
  tone_tag = get_emotional_tone_tag(emotion_profile, sentiment, threshold_labels, abuse_score)
414
  print(f"Emotional Tone Tag: {tone_tag}")
415
 
416
+ # Debug logs
417
  print("Emotion Profile:")
418
  for emotion, score in emotion_profile.items():
419
  print(f" {emotion.capitalize():10}: {score}")