Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -1,13 +1,15 @@
|
|
1 |
import gradio as gr
|
2 |
import torch
|
3 |
import numpy as np
|
4 |
-
from transformers import
|
5 |
from transformers import RobertaForSequenceClassification, RobertaTokenizer
|
6 |
|
7 |
-
# Load fine-tuned sentiment model
|
8 |
-
|
9 |
-
|
10 |
-
|
|
|
|
|
11 |
|
12 |
# Load abuse pattern model
|
13 |
model_name = "SamanthaStorm/abuse-pattern-detector-v2"
|
@@ -50,16 +52,6 @@ EXPLANATIONS = {
|
|
50 |
"obscure_formal": "Obscure/formal language manipulates through confusion or superiority."
|
51 |
}
|
52 |
|
53 |
-
def custom_sentiment(text):
|
54 |
-
inputs = sentiment_tokenizer(text, return_tensors="pt", truncation=True, padding=True)
|
55 |
-
with torch.no_grad():
|
56 |
-
outputs = sentiment_model(**inputs)
|
57 |
-
probs = torch.nn.functional.softmax(outputs.logits, dim=1)
|
58 |
-
label_idx = torch.argmax(probs).item()
|
59 |
-
label = sentiment_model.config.id2label[label_idx]
|
60 |
-
score = probs[0][label_idx].item()
|
61 |
-
return {"label": label, "score": score}
|
62 |
-
|
63 |
def calculate_abuse_level(scores, thresholds):
|
64 |
triggered_scores = [score for label, score in zip(LABELS, scores) if score > thresholds[label]]
|
65 |
return round(np.mean(triggered_scores) * 100, 2) if triggered_scores else 0.0
|
@@ -76,7 +68,7 @@ def analyze_messages(input_text, risk_flags):
|
|
76 |
if not input_text:
|
77 |
return "Please enter a message for analysis."
|
78 |
|
79 |
-
sentiment =
|
80 |
sentiment_label = sentiment['label']
|
81 |
sentiment_score = sentiment['score']
|
82 |
|
@@ -113,7 +105,7 @@ def analyze_messages(input_text, risk_flags):
|
|
113 |
top_patterns = sorted(scored_patterns, key=lambda x: x[1], reverse=True)[:2]
|
114 |
|
115 |
top_pattern_explanations = "\n".join([
|
116 |
-
f"
|
117 |
for label, _ in top_patterns
|
118 |
])
|
119 |
|
|
|
1 |
import gradio as gr
|
2 |
import torch
|
3 |
import numpy as np
|
4 |
+
from transformers import pipeline
|
5 |
from transformers import RobertaForSequenceClassification, RobertaTokenizer
|
6 |
|
7 |
+
# Load fine-tuned sentiment model from Hugging Face
|
8 |
+
sentiment_analyzer = pipeline(
|
9 |
+
"sentiment-analysis",
|
10 |
+
model="SamanthaStorm/Tether",
|
11 |
+
tokenizer="SamanthaStorm/Tether"
|
12 |
+
)
|
13 |
|
14 |
# Load abuse pattern model
|
15 |
model_name = "SamanthaStorm/abuse-pattern-detector-v2"
|
|
|
52 |
"obscure_formal": "Obscure/formal language manipulates through confusion or superiority."
|
53 |
}
|
54 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
55 |
def calculate_abuse_level(scores, thresholds):
|
56 |
triggered_scores = [score for label, score in zip(LABELS, scores) if score > thresholds[label]]
|
57 |
return round(np.mean(triggered_scores) * 100, 2) if triggered_scores else 0.0
|
|
|
68 |
if not input_text:
|
69 |
return "Please enter a message for analysis."
|
70 |
|
71 |
+
sentiment = sentiment_analyzer(input_text)[0]
|
72 |
sentiment_label = sentiment['label']
|
73 |
sentiment_score = sentiment['score']
|
74 |
|
|
|
105 |
top_patterns = sorted(scored_patterns, key=lambda x: x[1], reverse=True)[:2]
|
106 |
|
107 |
top_pattern_explanations = "\n".join([
|
108 |
+
f"• {label.replace('_', ' ').title()}: {EXPLANATIONS.get(label, 'No explanation available.')}"
|
109 |
for label, _ in top_patterns
|
110 |
])
|
111 |
|