Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -52,32 +52,32 @@ EXPLANATIONS = {
|
|
52 |
}
|
53 |
|
54 |
def custom_sentiment(text):
|
55 |
-
inputs = sentiment_tokenizer(text, return_tensors="pt", truncation=True, padding=True)
|
56 |
-
with torch.no_grad():
|
57 |
-
outputs = sentiment_model(**inputs)
|
58 |
-
probs = torch.nn.functional.softmax(outputs.logits, dim=1)
|
59 |
-
label_idx = torch.argmax(probs).item()
|
60 |
|
61 |
-
label_map = {0: "supportive", 1: "undermining"}
|
62 |
-
label = label_map[label_idx]
|
63 |
-
score = probs[0][label_idx].item()
|
64 |
-
return {"label": label, "score": score}
|
65 |
|
66 |
def calculate_abuse_level(scores, thresholds):
|
67 |
-
triggered_scores = [score for label, score in zip(LABELS, scores) if score > thresholds[label]]
|
68 |
-
return round(np.mean(triggered_scores) * 100, 2) if triggered_scores else 0.0
|
69 |
|
70 |
def interpret_abuse_level(score):
|
71 |
-
if score > 80: return "Extreme / High Risk"
|
72 |
-
elif score > 60: return "Severe / Harmful Pattern Present"
|
73 |
-
elif score > 40: return "Likely Abuse"
|
74 |
-
elif score > 20: return "Mild Concern"
|
75 |
-
return "Very Low / Likely Safe"
|
76 |
|
77 |
def analyze_messages(input_text, risk_flags):
|
78 |
-
input_text = input_text.strip()
|
79 |
-
if not input_text:
|
80 |
-
return "Please enter a message for analysis."
|
81 |
|
82 |
sentiment = custom_sentiment(input_text)
|
83 |
sentiment_label = sentiment['label']
|
@@ -151,4 +151,4 @@ title="Abuse Pattern Detector"
|
|
151 |
)
|
152 |
|
153 |
if name == "main":
|
154 |
-
iface.queue().launch()
|
|
|
52 |
}
|
53 |
|
54 |
def custom_sentiment(text):
|
55 |
+
inputs = sentiment_tokenizer(text, return_tensors="pt", truncation=True, padding=True)
|
56 |
+
with torch.no_grad():
|
57 |
+
outputs = sentiment_model(**inputs)
|
58 |
+
probs = torch.nn.functional.softmax(outputs.logits, dim=1)
|
59 |
+
label_idx = torch.argmax(probs).item()
|
60 |
|
61 |
+
label_map = {0: "supportive", 1: "undermining"}
|
62 |
+
label = label_map[label_idx]
|
63 |
+
score = probs[0][label_idx].item()
|
64 |
+
return {"label": label, "score": score}
|
65 |
|
66 |
def calculate_abuse_level(scores, thresholds):
|
67 |
+
triggered_scores = [score for label, score in zip(LABELS, scores) if score > thresholds[label]]
|
68 |
+
return round(np.mean(triggered_scores) * 100, 2) if triggered_scores else 0.0
|
69 |
|
70 |
def interpret_abuse_level(score):
|
71 |
+
if score > 80: return "Extreme / High Risk"
|
72 |
+
elif score > 60: return "Severe / Harmful Pattern Present"
|
73 |
+
elif score > 40: return "Likely Abuse"
|
74 |
+
elif score > 20: return "Mild Concern"
|
75 |
+
return "Very Low / Likely Safe"
|
76 |
|
77 |
def analyze_messages(input_text, risk_flags):
|
78 |
+
input_text = input_text.strip()
|
79 |
+
if not input_text:
|
80 |
+
return "Please enter a message for analysis."
|
81 |
|
82 |
sentiment = custom_sentiment(input_text)
|
83 |
sentiment_label = sentiment['label']
|
|
|
151 |
)
|
152 |
|
153 |
if name == "main":
|
154 |
+
iface.queue().launch()
|