SamanthaStorm commited on
Commit
068dda5
Β·
verified Β·
1 Parent(s): 068f7ad

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +120 -108
app.py CHANGED
@@ -4,152 +4,164 @@ import numpy as np
4
  from transformers import AutoModelForSequenceClassification, AutoTokenizer
5
  from transformers import RobertaForSequenceClassification, RobertaTokenizer
6
 
7
- # Load custom fine-tuned sentiment model
8
- sentiment_model = AutoModelForSequenceClassification.from_pretrained("SamanthaStorm/tether-sentiment")
9
- sentiment_tokenizer = AutoTokenizer.from_pretrained("SamanthaStorm/tether-sentiment")
10
 
11
- # Load abuse pattern model
12
- model_name = "SamanthaStorm/abuse-pattern-detector-v2"
 
 
 
 
13
  model = RobertaForSequenceClassification.from_pretrained(model_name, trust_remote_code=True)
14
  tokenizer = RobertaTokenizer.from_pretrained(model_name, trust_remote_code=True)
15
 
16
  LABELS = [
17
- "gaslighting", "mockery", "dismissiveness", "control", "guilt_tripping", "apology_baiting", "blame_shifting", "projection",
18
- "contradictory_statements", "manipulation", "deflection", "insults", "obscure_formal", "recovery_phase", "non_abusive",
19
- "suicidal_threat", "physical_threat", "extreme_control"
20
  ]
21
 
22
  THRESHOLDS = {
23
- "gaslighting": 0.25, "mockery": 0.15, "dismissiveness": 0.30, "control": 0.43, "guilt_tripping": 0.19,
24
- "apology_baiting": 0.45, "blame_shifting": 0.23, "projection": 0.50, "contradictory_statements": 0.25,
25
- "manipulation": 0.25, "deflection": 0.30, "insults": 0.34, "obscure_formal": 0.25, "recovery_phase": 0.25,
26
- "non_abusive": 2.0, "suicidal_threat": 0.45, "physical_threat": 0.02, "extreme_control": 0.36
27
  }
28
 
29
  PATTERN_LABELS = LABELS[:15]
30
  DANGER_LABELS = LABELS[15:18]
31
 
32
  EXPLANATIONS = {
33
- "gaslighting": "Gaslighting involves making someone question their own reality or perceptions...",
34
- "blame_shifting": "Blame-shifting is when one person redirects the responsibility...",
35
- "projection": "Projection involves accusing the victim of behaviors the abuser exhibits.",
36
- "dismissiveness": "Dismissiveness is belittling or disregarding another person’s feelings.",
37
- "mockery": "Mockery ridicules someone in a hurtful, humiliating way.",
38
- "recovery_phase": "Recovery phase dismisses someone's emotional healing process.",
39
- "insults": "Insults are derogatory remarks aimed at degrading someone.",
40
- "apology_baiting": "Apology-baiting manipulates victims into apologizing for abuser's behavior.",
41
- "deflection": "Deflection avoids accountability by redirecting blame.",
42
- "control": "Control restricts autonomy through manipulation or coercion.",
43
- "extreme_control": "Extreme control dominates decisions and behaviors entirely.",
44
- "physical_threat": "Physical threats signal risk of bodily harm.",
45
- "suicidal_threat": "Suicidal threats manipulate others using self-harm threats.",
46
- "guilt_tripping": "Guilt-tripping uses guilt to manipulate someone’s actions.",
47
- "manipulation": "Manipulation deceives to influence or control outcomes.",
48
- "non_abusive": "Non-abusive language is respectful and free of coercion.",
49
- "obscure_formal": "Obscure/formal language manipulates through confusion or superiority."
50
  }
51
 
52
  def custom_sentiment(text):
53
- inputs = sentiment_tokenizer(text, return_tensors="pt", truncation=True, padding=True)
54
- with torch.no_grad():
55
- outputs = sentiment_model(**inputs)
56
- probs = torch.nn.functional.softmax(outputs.logits, dim=1)
57
- label_idx = torch.argmax(probs).item()
58
-
59
- # Map index to custom label
60
- label_map = {0: "supportive", 1: "undermining"}
61
- label = label_map[label_idx]
62
-
63
- score = probs[0][label_idx].item()
64
- return {"label": label, "score": score}
65
 
66
  def calculate_abuse_level(scores, thresholds):
67
- triggered_scores = [score for label, score in zip(LABELS, scores) if score > thresholds[label]]
68
- return round(np.mean(triggered_scores) * 100, 2) if triggered_scores else 0.0
69
 
70
  def interpret_abuse_level(score):
71
- if score > 80: return "Extreme / High Risk"
72
- elif score > 60: return "Severe / Harmful Pattern Present"
73
- elif score > 40: return "Likely Abuse"
74
- elif score > 20: return "Mild Concern"
75
- return "Very Low / Likely Safe"
76
 
77
  def analyze_messages(input_text, risk_flags):
78
- input_text = input_text.strip()
79
- if not input_text:
80
- return "Please enter a message for analysis."
81
 
82
- sentiment = custom_sentiment(input_text)
83
- sentiment_label = sentiment['label']
84
- sentiment_score = sentiment['score']
85
 
86
- adjusted_thresholds = {k: v * 0.8 for k, v in THRESHOLDS.items()} if sentiment_label == "undermining" else THRESHOLDS.copy()
87
 
88
- inputs = tokenizer(input_text, return_tensors="pt", truncation=True, padding=True)
89
- with torch.no_grad():
90
- outputs = model(**inputs)
91
- scores = torch.sigmoid(outputs.logits.squeeze(0)).numpy()
92
 
93
- pattern_count = sum(score > adjusted_thresholds[label] for label, score in zip(PATTERN_LABELS, scores[:15]))
94
- danger_flag_count = sum(score > adjusted_thresholds[label] for label, score in zip(DANGER_LABELS, scores[15:18]))
95
 
96
- contextual_flags = risk_flags if risk_flags else []
97
- if len(contextual_flags) >= 2:
98
- danger_flag_count += 1
99
 
100
- critical_flags = ["They've threatened harm", "They monitor/follow me", "I feel unsafe when alone with them"]
101
- high_risk_context = any(flag in contextual_flags for flag in critical_flags)
102
 
103
- non_abusive_score = scores[LABELS.index('non_abusive')]
104
- if non_abusive_score > adjusted_thresholds['non_abusive']:
105
- return "This message is classified as non-abusive."
106
 
107
- abuse_level = calculate_abuse_level(scores, adjusted_thresholds)
108
- abuse_description = interpret_abuse_level(abuse_level)
109
 
110
- if danger_flag_count >= 2:
111
- resources = "Immediate assistance recommended. Please seek professional help or contact emergency services."
112
- else:
113
- resources = "For more information on abuse patterns, consider reaching out to support groups or professional counselors."
114
 
115
- scored_patterns = [
116
- (label, score) for label, score in zip(PATTERN_LABELS, scores[:15])
117
- if label != "non_abusive"
118
- ]
119
- top_patterns = sorted(scored_patterns, key=lambda x: x[1], reverse=True)[:2]
120
 
121
- top_pattern_explanations = "\n".join([
122
- f"\u2022 {label.replace('_', ' ').title()}: {EXPLANATIONS.get(label, 'No explanation available.')}"
123
- for label, _ in top_patterns
124
- ])
 
 
 
 
 
125
 
126
- result = (
127
- f"Abuse Risk Score: {abuse_level}% – {abuse_description}\n\n"
128
- f"Most Likely Patterns:\n{top_pattern_explanations}\n\n"
129
- f"⚠️ Critical Danger Flags Detected: {danger_flag_count} of 3\n"
130
- "Resources: " + resources + "\n\n"
131
- f"Sentiment: {sentiment_label.title()} (Confidence: {sentiment_score*100:.2f}%)"
132
- )
 
 
 
 
 
 
 
 
 
 
 
133
 
134
- if contextual_flags:
135
- result += "\n\n⚠️ You indicated the following:\n" + "\n".join([f"β€’ {flag}" for flag in contextual_flags])
136
- if high_risk_context:
137
- result += "\n\n🚨 These responses suggest a high-risk situation. Consider seeking immediate help or safety planning resources."
138
 
139
- return result
140
 
141
  iface = gr.Interface(
142
- fn=analyze_messages,
143
- inputs=[
144
- gr.Textbox(lines=10, placeholder="Enter message here..."),
145
- gr.CheckboxGroup(label="Do any of these apply to your situation?", choices=[
146
- "They've threatened harm", "They isolate me", "I’ve changed my behavior out of fear",
147
- "They monitor/follow me", "I feel unsafe when alone with them"
148
- ])
149
- ],
150
- outputs=[gr.Textbox(label="Analysis Result")],
151
- title="Abuse Pattern Detector"
152
  )
153
 
154
- if __name__ == "__main__":
155
- iface.launch()
 
4
  from transformers import AutoModelForSequenceClassification, AutoTokenizer
5
  from transformers import RobertaForSequenceClassification, RobertaTokenizer
6
 
7
+ Load custom fine-tuned sentiment model
 
 
8
 
9
+ sentiment_model = AutoModelForSequenceClassification.from_pretrained(β€œSamanthaStorm/tether-sentiment”)
10
+ sentiment_tokenizer = AutoTokenizer.from_pretrained(β€œSamanthaStorm/tether-sentiment”)
11
+
12
+ Load abuse pattern model
13
+
14
+ model_name = β€œSamanthaStorm/abuse-pattern-detector-v2”
15
  model = RobertaForSequenceClassification.from_pretrained(model_name, trust_remote_code=True)
16
  tokenizer = RobertaTokenizer.from_pretrained(model_name, trust_remote_code=True)
17
 
18
  LABELS = [
19
+ β€œgaslighting”, β€œmockery”, β€œdismissiveness”, β€œcontrol”, β€œguilt_tripping”, β€œapology_baiting”, β€œblame_shifting”, β€œprojection”,
20
+ β€œcontradictory_statements”, β€œmanipulation”, β€œdeflection”, β€œinsults”, β€œobscure_formal”, β€œrecovery_phase”, β€œnon_abusive”,
21
+ β€œsuicidal_threat”, β€œphysical_threat”, β€œextreme_control”
22
  ]
23
 
24
  THRESHOLDS = {
25
+ β€œgaslighting”: 0.25, β€œmockery”: 0.15, β€œdismissiveness”: 0.30, β€œcontrol”: 0.43, β€œguilt_tripping”: 0.19,
26
+ β€œapology_baiting”: 0.45, β€œblame_shifting”: 0.23, β€œprojection”: 0.50, β€œcontradictory_statements”: 0.25,
27
+ β€œmanipulation”: 0.25, β€œdeflection”: 0.30, β€œinsults”: 0.34, β€œobscure_formal”: 0.25, β€œrecovery_phase”: 0.25,
28
+ β€œnon_abusive”: 2.0, β€œsuicidal_threat”: 0.45, β€œphysical_threat”: 0.02, β€œextreme_control”: 0.36
29
  }
30
 
31
  PATTERN_LABELS = LABELS[:15]
32
  DANGER_LABELS = LABELS[15:18]
33
 
34
  EXPLANATIONS = {
35
+ β€œgaslighting”: β€œGaslighting involves making someone question their own reality or perceptions…”,
36
+ β€œblame_shifting”: β€œBlame-shifting is when one person redirects the responsibility…”,
37
+ β€œprojection”: β€œProjection involves accusing the victim of behaviors the abuser exhibits.”,
38
+ β€œdismissiveness”: β€œDismissiveness is belittling or disregarding another person’s feelings.”,
39
+ β€œmockery”: β€œMockery ridicules someone in a hurtful, humiliating way.”,
40
+ β€œrecovery_phase”: β€œRecovery phase dismisses someone’s emotional healing process.”,
41
+ β€œinsults”: β€œInsults are derogatory remarks aimed at degrading someone.”,
42
+ β€œapology_baiting”: β€œApology-baiting manipulates victims into apologizing for abuser’s behavior.”,
43
+ β€œdeflection”: β€œDeflection avoids accountability by redirecting blame.”,
44
+ β€œcontrol”: β€œControl restricts autonomy through manipulation or coercion.”,
45
+ β€œextreme_control”: β€œExtreme control dominates decisions and behaviors entirely.”,
46
+ β€œphysical_threat”: β€œPhysical threats signal risk of bodily harm.”,
47
+ β€œsuicidal_threat”: β€œSuicidal threats manipulate others using self-harm threats.”,
48
+ β€œguilt_tripping”: β€œGuilt-tripping uses guilt to manipulate someone’s actions.”,
49
+ β€œmanipulation”: β€œManipulation deceives to influence or control outcomes.”,
50
+ β€œnon_abusive”: β€œNon-abusive language is respectful and free of coercion.”,
51
+ β€œobscure_formal”: β€œObscure/formal language manipulates through confusion or superiority.”
52
  }
53
 
54
  def custom_sentiment(text):
55
+ inputs = sentiment_tokenizer(text, return_tensors=β€œpt”, truncation=True, padding=True)
56
+ with torch.no_grad():
57
+ outputs = sentiment_model(**inputs)
58
+ probs = torch.nn.functional.softmax(outputs.logits, dim=1)
59
+ label_idx = torch.argmax(probs).item()
60
+ label_map = {0: β€œsupportive”, 1: β€œundermining”}
61
+ label = label_map[label_idx]
62
+ score = probs[0][label_idx].item()
63
+ return {β€œlabel”: label, β€œscore”: score}
 
 
 
64
 
65
  def calculate_abuse_level(scores, thresholds):
66
+ triggered_scores = [score for label, score in zip(LABELS, scores) if score > thresholds[label]]
67
+ return round(np.mean(triggered_scores) * 100, 2) if triggered_scores else 0.0
68
 
69
  def interpret_abuse_level(score):
70
+ if score > 80: return β€œExtreme / High Risk”
71
+ elif score > 60: return β€œSevere / Harmful Pattern Present”
72
+ elif score > 40: return β€œLikely Abuse”
73
+ elif score > 20: return β€œMild Concern”
74
+ return β€œVery Low / Likely Safe”
75
 
76
  def analyze_messages(input_text, risk_flags):
77
+ input_text = input_text.strip()
78
+ if not input_text:
79
+ return β€œPlease enter a message for analysis.”
80
 
81
+ sentiment = custom_sentiment(input_text)
82
+ sentiment_label = sentiment['label']
83
+ sentiment_score = sentiment['score']
84
 
85
+ adjusted_thresholds = {k: v * 0.8 for k, v in THRESHOLDS.items()} if sentiment_label == "undermining" else THRESHOLDS.copy()
86
 
87
+ inputs = tokenizer(input_text, return_tensors="pt", truncation=True, padding=True)
88
+ with torch.no_grad():
89
+ outputs = model(**inputs)
90
+ scores = torch.sigmoid(outputs.logits.squeeze(0)).numpy()
91
 
92
+ pattern_count = sum(score > adjusted_thresholds[label] for label, score in zip(PATTERN_LABELS, scores[:15]))
93
+ danger_flag_count = sum(score > adjusted_thresholds[label] for label, score in zip(DANGER_LABELS, scores[15:18]))
94
 
95
+ contextual_flags = risk_flags if risk_flags else []
96
+ if len(contextual_flags) >= 2:
97
+ danger_flag_count += 1
98
 
99
+ critical_flags = ["They've threatened harm", "They monitor/follow me", "I feel unsafe when alone with them"]
100
+ high_risk_context = any(flag in contextual_flags for flag in critical_flags)
101
 
102
+ non_abusive_score = scores[LABELS.index('non_abusive')]
103
+ if non_abusive_score > adjusted_thresholds['non_abusive']:
104
+ return "This message is classified as non-abusive."
105
 
106
+ abuse_level = calculate_abuse_level(scores, adjusted_thresholds)
107
+ abuse_description = interpret_abuse_level(abuse_level)
108
 
109
+ if danger_flag_count >= 2:
110
+ resources = "Immediate assistance recommended. Please seek professional help or contact emergency services."
111
+ else:
112
+ resources = "For more information on abuse patterns, consider reaching out to support groups or professional counselors."
113
 
114
+ scored_patterns = [(label, scores[LABELS.index(label)]) for label in PATTERN_LABELS if label != "non_abusive"]
115
+ scored_dangers = [(label, scores[LABELS.index(label)]) for label in DANGER_LABELS]
 
 
 
116
 
117
+ top_patterns = sorted(
118
+ [(label, score) for label, score in scored_patterns if score > adjusted_thresholds[label]],
119
+ key=lambda x: x[1], reverse=True
120
+ )[:2]
121
+
122
+ top_dangers = sorted(
123
+ [(label, score) for label, score in scored_dangers if score > adjusted_thresholds[label]],
124
+ key=lambda x: x[1], reverse=True
125
+ )
126
 
127
+ highlighted_danger = top_dangers[0] if top_dangers else None
128
+
129
+ top_pattern_explanations = "\n".join([
130
+ f"\u2022 {label.replace('_', ' ').title()}: {EXPLANATIONS.get(label, 'No explanation available.')}"
131
+ for label, _ in top_patterns
132
+ ])
133
+
134
+ if highlighted_danger and highlighted_danger[0] not in [label for label, _ in top_patterns]:
135
+ danger_label = highlighted_danger[0]
136
+ top_pattern_explanations += f"\n\u2022 {danger_label.replace('_', ' ').title()}: {EXPLANATIONS.get(danger_label, 'No explanation available.')} (Danger Pattern)"
137
+
138
+ result = (
139
+ f"Abuse Risk Score: {abuse_level}% – {abuse_description}\n\n"
140
+ f"Most Likely Patterns:\n{top_pattern_explanations}\n\n"
141
+ f"⚠️ Critical Danger Flags Detected: {danger_flag_count} of 3\n"
142
+ "Resources: " + resources + "\n\n"
143
+ f"Sentiment: {sentiment_label.title()} (Confidence: {sentiment_score*100:.2f}%)"
144
+ )
145
 
146
+ if contextual_flags:
147
+ result += "\n\n⚠️ You indicated the following:\n" + "\n".join([f"β€’ {flag}" for flag in contextual_flags])
148
+ if high_risk_context:
149
+ result += "\n\n🚨 These responses suggest a high-risk situation. Consider seeking immediate help or safety planning resources."
150
 
151
+ return result
152
 
153
  iface = gr.Interface(
154
+ fn=analyze_messages,
155
+ inputs=[
156
+ gr.Textbox(lines=10, placeholder=β€œEnter message here…”),
157
+ gr.CheckboxGroup(label=β€œDo any of these apply to your situation?”, choices=[
158
+ β€œThey’ve threatened harm”, β€œThey isolate me”, β€œI’ve changed my behavior out of fear”,
159
+ β€œThey monitor/follow me”, β€œI feel unsafe when alone with them”
160
+ ])
161
+ ],
162
+ outputs=[gr.Textbox(label=β€œAnalysis Result”)],
163
+ title=β€œAbuse Pattern Detector”
164
  )
165
 
166
+ if name == β€œmain”:
167
+ iface.launch()