SamanthaStorm commited on
Commit
ff45556
·
verified ·
1 Parent(s): af48cbf

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +10 -5
app.py CHANGED
@@ -102,7 +102,9 @@ def analyze_messages(input_text, risk_flags):
102
  input_text = input_text.strip()
103
  if not input_text:
104
  return "Please enter a message for analysis."
105
- normalized_text = input_text.strip().lower()
 
 
106
 
107
  motif_flags, matched_phrases = detect_motifs(input_text)
108
  risk_flags = list(set(risk_flags + motif_flags)) if risk_flags else motif_flags
@@ -123,10 +125,11 @@ def analyze_messages(input_text, risk_flags):
123
  pattern_labels_used = list(set(threshold_labels + phrase_labels))
124
 
125
  contextual_flags = risk_flags if risk_flags else []
126
- if len(contextual_flags) >= 2:
127
- danger_flag_count += 1
128
  abuse_level = calculate_abuse_level(scores, adjusted_thresholds, motif_hits=[label for label, _ in matched_phrases])
129
  abuse_description = interpret_abuse_level(abuse_level)
 
130
  # Escalate risk if user checks a critical context box
131
  if contextual_flags and abuse_level < 15:
132
  abuse_level = 15 # bump to at least Mild Concern
@@ -198,6 +201,7 @@ def analyze_messages(input_text, risk_flags):
198
 
199
  return result
200
 
 
201
  iface = gr.Interface(
202
  fn=analyze_messages,
203
  inputs=[
@@ -209,8 +213,9 @@ iface = gr.Interface(
209
  ],
210
  outputs=[gr.Textbox(label="Analysis Result")],
211
  title="Abuse Pattern Detector",
212
- live=True
 
213
  )
214
 
215
  if __name__ == "__main__":
216
- iface.queue().launch()
 
102
  input_text = input_text.strip()
103
  if not input_text:
104
  return "Please enter a message for analysis."
105
+
106
+ # Normalize the text (example: lower case)
107
+ normalized_text = input_text.strip().lower()
108
 
109
  motif_flags, matched_phrases = detect_motifs(input_text)
110
  risk_flags = list(set(risk_flags + motif_flags)) if risk_flags else motif_flags
 
125
  pattern_labels_used = list(set(threshold_labels + phrase_labels))
126
 
127
  contextual_flags = risk_flags if risk_flags else []
128
+ # Note: If there are two or more contextual flags, you might wish to adjust a danger counter
129
+ # danger_flag_count += 1 <-- Ensure that danger_flag_count is defined before incrementing.
130
  abuse_level = calculate_abuse_level(scores, adjusted_thresholds, motif_hits=[label for label, _ in matched_phrases])
131
  abuse_description = interpret_abuse_level(abuse_level)
132
+
133
  # Escalate risk if user checks a critical context box
134
  if contextual_flags and abuse_level < 15:
135
  abuse_level = 15 # bump to at least Mild Concern
 
201
 
202
  return result
203
 
204
+ # Updated Interface: Added flagging functionality to allow users to flag mispredictions.
205
  iface = gr.Interface(
206
  fn=analyze_messages,
207
  inputs=[
 
213
  ],
214
  outputs=[gr.Textbox(label="Analysis Result")],
215
  title="Abuse Pattern Detector",
216
+ live=True,
217
+ allow_flagging="manual" # This enables the manual flagging button for user feedback.
218
  )
219
 
220
  if __name__ == "__main__":
221
+ iface.queue().launch()