Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -4,14 +4,15 @@ import numpy as np
|
|
4 |
from transformers import AutoModelForSequenceClassification, AutoTokenizer
|
5 |
from transformers import RobertaForSequenceClassification, RobertaTokenizer
|
6 |
|
7 |
-
# Load fine-tuned sentiment model
|
8 |
-
|
9 |
-
|
|
|
10 |
|
11 |
-
# Load abuse pattern model
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
|
16 |
LABELS = [
|
17 |
"gaslighting", "mockery", "dismissiveness", "control", "guilt_tripping", "apology_baiting", "blame_shifting", "projection",
|
@@ -81,9 +82,9 @@ def analyze_messages(input_text, risk_flags):
|
|
81 |
|
82 |
adjusted_thresholds = {k: v * 0.8 for k, v in THRESHOLDS.items()} if sentiment_label == "NEGATIVE" else THRESHOLDS.copy()
|
83 |
|
84 |
-
inputs =
|
85 |
with torch.no_grad():
|
86 |
-
outputs =
|
87 |
scores = torch.sigmoid(outputs.logits.squeeze(0)).numpy()
|
88 |
|
89 |
pattern_count = sum(score > adjusted_thresholds[label] for label, score in zip(PATTERN_LABELS, scores[:15]))
|
@@ -120,7 +121,6 @@ def analyze_messages(input_text, risk_flags):
|
|
120 |
f"Abuse Risk Score: {abuse_level}% – {abuse_description}\n\n"
|
121 |
f"Most Likely Patterns:\n{top_pattern_explanations}\n\n"
|
122 |
f"⚠️ Critical Danger Flags Detected: {danger_flag_count} of 3\n"
|
123 |
-
"The Danger Assessment is a validated tool that helps identify serious risk in intimate partner violence.\n\n"
|
124 |
f"Resources: {resources}\n\n"
|
125 |
f"Sentiment: {sentiment_label} (Confidence: {sentiment_score*100:.2f}%)"
|
126 |
)
|
|
|
4 |
from transformers import AutoModelForSequenceClassification, AutoTokenizer
|
5 |
from transformers import RobertaForSequenceClassification, RobertaTokenizer
|
6 |
|
7 |
+
# Load fine-tuned sentiment model (DistilBERT)
|
8 |
+
sentiment_model_name = "SamanthaStorm/tether-sentiment"
|
9 |
+
sentiment_model = AutoModelForSequenceClassification.from_pretrained(sentiment_model_name)
|
10 |
+
sentiment_tokenizer = AutoTokenizer.from_pretrained(sentiment_model_name)
|
11 |
|
12 |
+
# Load abuse pattern model (RoBERTa)
|
13 |
+
abuse_model_name = "SamanthaStorm/abuse-pattern-detector-v2"
|
14 |
+
abuse_model = RobertaForSequenceClassification.from_pretrained(abuse_model_name)
|
15 |
+
abuse_tokenizer = RobertaTokenizer.from_pretrained(abuse_model_name)
|
16 |
|
17 |
LABELS = [
|
18 |
"gaslighting", "mockery", "dismissiveness", "control", "guilt_tripping", "apology_baiting", "blame_shifting", "projection",
|
|
|
82 |
|
83 |
adjusted_thresholds = {k: v * 0.8 for k, v in THRESHOLDS.items()} if sentiment_label == "NEGATIVE" else THRESHOLDS.copy()
|
84 |
|
85 |
+
inputs = abuse_tokenizer(input_text, return_tensors="pt", truncation=True, padding=True)
|
86 |
with torch.no_grad():
|
87 |
+
outputs = abuse_model(**inputs)
|
88 |
scores = torch.sigmoid(outputs.logits.squeeze(0)).numpy()
|
89 |
|
90 |
pattern_count = sum(score > adjusted_thresholds[label] for label, score in zip(PATTERN_LABELS, scores[:15]))
|
|
|
121 |
f"Abuse Risk Score: {abuse_level}% – {abuse_description}\n\n"
|
122 |
f"Most Likely Patterns:\n{top_pattern_explanations}\n\n"
|
123 |
f"⚠️ Critical Danger Flags Detected: {danger_flag_count} of 3\n"
|
|
|
124 |
f"Resources: {resources}\n\n"
|
125 |
f"Sentiment: {sentiment_label} (Confidence: {sentiment_score*100:.2f}%)"
|
126 |
)
|