Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -144,20 +144,28 @@ def analyze_single_message(text, thresholds, motif_flags):
|
|
144 |
motif_hits, matched_phrases = detect_motifs(text)
|
145 |
sentiment = custom_sentiment(text)
|
146 |
sentiment_score = sentiment["score"] if sentiment["label"] == "undermining" else 0.0
|
|
|
|
|
|
|
|
|
147 |
adjusted_thresholds = {k: v * 0.8 for k, v in thresholds.items()} if sentiment['label'] == "undermining" else thresholds.copy()
|
|
|
148 |
inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True)
|
149 |
with torch.no_grad():
|
150 |
outputs = model(**inputs)
|
151 |
scores = torch.sigmoid(outputs.logits.squeeze(0)).numpy()
|
|
|
152 |
threshold_labels = [label for label, score in zip(LABELS, scores) if score > adjusted_thresholds[label]]
|
153 |
phrase_labels = [label for label, _ in matched_phrases]
|
154 |
pattern_labels_used = list(set(threshold_labels + phrase_labels))
|
|
|
155 |
abuse_level = calculate_abuse_level(scores, adjusted_thresholds, motif_hits)
|
156 |
top_patterns = sorted([(label, score) for label, score in zip(LABELS, scores)], key=lambda x: x[1], reverse=True)[:2]
|
157 |
motif_phrases = [text for _, text in matched_phrases]
|
158 |
contradiction_flag = detect_contradiction(text)
|
159 |
darvo_score = calculate_darvo_score(pattern_labels_used, 0.0, sentiment_score, motif_phrases, contradiction_flag)
|
160 |
-
|
|
|
161 |
|
162 |
def analyze_composite(msg1, msg2, msg3, flags):
|
163 |
thresholds = THRESHOLDS
|
@@ -165,7 +173,13 @@ def analyze_composite(msg1, msg2, msg3, flags):
|
|
165 |
active_messages = [m for m in messages if m.strip()]
|
166 |
if not active_messages:
|
167 |
return "Please enter at least one message."
|
168 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
169 |
abuse_scores = [r[0] for r in results]
|
170 |
darvo_scores = [r[3] for r in results]
|
171 |
average_darvo = round(sum(darvo_scores) / len(darvo_scores), 3)
|
|
|
144 |
motif_hits, matched_phrases = detect_motifs(text)
|
145 |
sentiment = custom_sentiment(text)
|
146 |
sentiment_score = sentiment["score"] if sentiment["label"] == "undermining" else 0.0
|
147 |
+
|
148 |
+
# TEMP: print sentiment to console for debugging
|
149 |
+
print(f"Sentiment label: {sentiment['label']}, score: {sentiment['score']}")
|
150 |
+
|
151 |
adjusted_thresholds = {k: v * 0.8 for k, v in thresholds.items()} if sentiment['label'] == "undermining" else thresholds.copy()
|
152 |
+
|
153 |
inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True)
|
154 |
with torch.no_grad():
|
155 |
outputs = model(**inputs)
|
156 |
scores = torch.sigmoid(outputs.logits.squeeze(0)).numpy()
|
157 |
+
|
158 |
threshold_labels = [label for label, score in zip(LABELS, scores) if score > adjusted_thresholds[label]]
|
159 |
phrase_labels = [label for label, _ in matched_phrases]
|
160 |
pattern_labels_used = list(set(threshold_labels + phrase_labels))
|
161 |
+
|
162 |
abuse_level = calculate_abuse_level(scores, adjusted_thresholds, motif_hits)
|
163 |
top_patterns = sorted([(label, score) for label, score in zip(LABELS, scores)], key=lambda x: x[1], reverse=True)[:2]
|
164 |
motif_phrases = [text for _, text in matched_phrases]
|
165 |
contradiction_flag = detect_contradiction(text)
|
166 |
darvo_score = calculate_darvo_score(pattern_labels_used, 0.0, sentiment_score, motif_phrases, contradiction_flag)
|
167 |
+
|
168 |
+
return abuse_level, pattern_labels_used, top_patterns, darvo_score, sentiment
|
169 |
|
170 |
def analyze_composite(msg1, msg2, msg3, flags):
|
171 |
thresholds = THRESHOLDS
|
|
|
173 |
active_messages = [m for m in messages if m.strip()]
|
174 |
if not active_messages:
|
175 |
return "Please enter at least one message."
|
176 |
+
|
177 |
+
results = []
|
178 |
+
for m in active_messages:
|
179 |
+
result = analyze_single_message(m, thresholds, flags)
|
180 |
+
print(f"Message: {m}")
|
181 |
+
print(f"Sentiment result: {result[4]}")
|
182 |
+
results.append(result)
|
183 |
abuse_scores = [r[0] for r in results]
|
184 |
darvo_scores = [r[3] for r in results]
|
185 |
average_darvo = round(sum(darvo_scores) / len(darvo_scores), 3)
|