SamanthaStorm commited on
Commit
b0731a6
·
verified ·
1 Parent(s): 63a3e67

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -2
app.py CHANGED
@@ -88,11 +88,14 @@ def analyze_messages(input_text):
88
  # Prepare the result summary and detailed scores
89
  result = (
90
  f"Abuse Patterns Detected: {pattern_count} out of {len(PATTERN_LABELS)}\n"
 
91
  f"Abuse Level: {abuse_level}% - {abuse_description}\n"
92
- f"Resources: {resources}"
93
  "The Danger Assessment is a validated tool used to help identify the level of risk for serious injury or homicide in cases of intimate partner violence.. "
94
  "It looks for specific abusive patterns that increase the likelihood of severe harm. "
95
- f"The messages just analyzed contain at least {DANGER_LABELS} critical flags."
 
 
96
  )
97
 
98
  # Return both a text summary and a JSON-like dict of scores per label
 
88
  # Prepare the result summary and detailed scores
89
  result = (
90
  f"Abuse Patterns Detected: {pattern_count} out of {len(PATTERN_LABELS)}\n"
91
+
92
  f"Abuse Level: {abuse_level}% - {abuse_description}\n"
93
+
94
  "The Danger Assessment is a validated tool used to help identify the level of risk for serious injury or homicide in cases of intimate partner violence.. "
95
  "It looks for specific abusive patterns that increase the likelihood of severe harm. "
96
+ f"The messages just analyzed contain at least {danger_flag_count} critical flags."
97
+
98
+ f"Resources: {resources}"
99
  )
100
 
101
  # Return both a text summary and a JSON-like dict of scores per label