bibibi12345 commited on
Commit
a84d7ad
·
1 Parent(s): b981c80

added styling and tags

Browse files
Files changed (1) hide show
  1. app/message_processing.py +79 -20
app/message_processing.py CHANGED
@@ -4,11 +4,36 @@ import json
4
  import time
5
  import urllib.parse
6
  from typing import List, Dict, Any, Union, Literal, Tuple # Added Tuple
 
7
 
8
  from google.genai import types
9
  from models import OpenAIMessage, ContentPartText, ContentPartImage
10
 
11
  SUPPORTED_ROLES = ["user", "model"]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
12
  # New function to extract reasoning based on specified tags
13
  # Removed duplicate import
14
 
@@ -211,6 +236,54 @@ def deobfuscate_text(text: str) -> str:
211
  text = text.replace("```", placeholder).replace("``", "").replace("♩", "").replace("`♡`", "").replace("♡", "").replace("` `", "").replace("`", "").replace(placeholder, "```")
212
  return text
213
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
214
  def parse_gemini_response_for_reasoning_and_content(gemini_response_candidate: Any) -> Tuple[str, str]:
215
  """
216
  Parses a Gemini response candidate's content parts to separate reasoning and actual content.
@@ -268,18 +341,11 @@ def convert_to_openai_format(gemini_response: Any, model: str) -> Dict[str, Any]
268
  final_normal_content_str = deobfuscate_text(final_normal_content_str)
269
 
270
  if hasattr(candidate, 'safety_ratings') and candidate.safety_ratings:
271
- ratings_list = []
272
- for rating in candidate.safety_ratings:
273
- category = rating.category.name.replace('HARM_CATEGORY_', '').replace('_', ' ').title()
274
- probability = rating.probability.name
275
- ratings_list.append(
276
- f"{category}: {probability} (Score: {rating.probability_score}, Severity: {rating.severity_score})"
277
- )
278
- safety_ratings_str = "\n\n--- Safety Ratings ---\n" + "\n".join(ratings_list) + "\n"
279
  if final_reasoning_content_str:
280
- final_reasoning_content_str += safety_ratings_str
281
  else:
282
- final_normal_content_str += safety_ratings_str
283
 
284
  message_payload = {"role": "assistant", "content": final_normal_content_str}
285
  if final_reasoning_content_str:
@@ -323,18 +389,11 @@ def convert_chunk_to_openai(chunk: Any, model: str, response_id: str, candidate_
323
  normal_text = deobfuscate_text(normal_text)
324
 
325
  if hasattr(candidate, 'safety_ratings') and candidate.safety_ratings:
326
- ratings_list = []
327
- for rating in candidate.safety_ratings:
328
- category = rating.category.name.replace('HARM_CATEGORY_', '').replace('_', ' ').title()
329
- probability = rating.probability.name
330
- ratings_list.append(
331
- f"{category}: {probability} (Score: {rating.probability_score}, Severity: {rating.severity_score})"
332
- )
333
- safety_ratings_str = "\n\n--- Safety Ratings ---\n" + "\n".join(ratings_list) + "\n"
334
  if reasoning_text:
335
- reasoning_text += safety_ratings_str
336
  else:
337
- normal_text += safety_ratings_str
338
 
339
  if reasoning_text: delta_payload['reasoning_content'] = reasoning_text
340
  if normal_text or (not reasoning_text and not delta_payload): # Ensure content key if nothing else
 
4
  import time
5
  import urllib.parse
6
  from typing import List, Dict, Any, Union, Literal, Tuple # Added Tuple
7
+ from enum import Enum
8
 
9
  from google.genai import types
10
  from models import OpenAIMessage, ContentPartText, ContentPartImage
11
 
12
  SUPPORTED_ROLES = ["user", "model"]
13
+
14
+ class HarmProbability(Enum):
15
+ UNKNOWN = 0
16
+ NEGLIGIBLE = 1
17
+ LOW = 2
18
+ MEDIUM = 3
19
+ HIGH = 4
20
+
21
+ def _get_highest_harm_probability(safety_ratings: List[Any]) -> str:
22
+ """Finds the highest harm probability from a list of safety ratings."""
23
+ max_prob = HarmProbability.UNKNOWN
24
+ if not safety_ratings:
25
+ return max_prob.name
26
+
27
+ for rating in safety_ratings:
28
+ try:
29
+ prob_name = rating.probability.name
30
+ prob_enum = HarmProbability[prob_name]
31
+ if prob_enum.value > max_prob.value:
32
+ max_prob = prob_enum
33
+ except (KeyError, AttributeError):
34
+ # Ignore if the probability name is not in our enum or attribute is missing
35
+ continue
36
+ return max_prob.name
37
  # New function to extract reasoning based on specified tags
38
  # Removed duplicate import
39
 
 
236
  text = text.replace("```", placeholder).replace("``", "").replace("♩", "").replace("`♡`", "").replace("♡", "").replace("` `", "").replace("`", "").replace(placeholder, "```")
237
  return text
238
 
239
+
240
+ def _create_safety_ratings_html(safety_ratings: List[Any]) -> str:
241
+ """Generates a styled HTML block for safety ratings."""
242
+ if not safety_ratings:
243
+ return ""
244
+
245
+ # Find the rating with the highest probability score
246
+ highest_rating = max(safety_ratings, key=lambda r: r.probability_score)
247
+ highest_score = highest_rating.probability_score
248
+
249
+ # Determine color based on the highest score
250
+ if highest_score <= 0.33:
251
+ color = "#0f8" # green
252
+ elif highest_score <= 0.66:
253
+ color = "yellow"
254
+ else:
255
+ color = "red"
256
+
257
+ # Format the summary line for the highest score
258
+ summary_category = highest_rating.category.name.replace('HARM_CATEGORY_', '').replace('_', ' ').title()
259
+ summary_probability = highest_rating.probability.name
260
+ # Using .7f for score and .8f for severity as per example's precision
261
+ summary_line = f"{summary_category}: {summary_probability} (Score: {highest_rating.probability_score:.7f}, Severity: {highest_rating.severity_score:.8f})"
262
+
263
+ # Format the list of all ratings for the <pre> block
264
+ ratings_list = []
265
+ for rating in safety_ratings:
266
+ category = rating.category.name.replace('HARM_CATEGORY_', '').replace('_', ' ').title()
267
+ probability = rating.probability.name
268
+ ratings_list.append(
269
+ f"{category}: {probability} (Score: {rating.probability_score:.7f}, Severity: {rating.severity_score:.8f})"
270
+ )
271
+ all_ratings_str = '\n'.join(ratings_list)
272
+
273
+ # CSS Style as specified
274
+ css_style = "<style>.cb{border:1px solid #444;margin:10px;border-radius:4px;background:#111}.cb summary{padding:8px;cursor:pointer;background:#222}.cb pre{margin:0;padding:10px;border-top:1px solid #444;white-space:pre-wrap}</style>"
275
+
276
+ # Final HTML structure
277
+ html_output = (
278
+ f'{css_style}'
279
+ f'<details class="cb">'
280
+ f'<summary style="color:{color}">{summary_line} ▼</summary>'
281
+ f'<pre>\n--- Safety Ratings ---\n{all_ratings_str}\n</pre>'
282
+ f'</details>'
283
+ )
284
+
285
+ return html_output
286
+
287
  def parse_gemini_response_for_reasoning_and_content(gemini_response_candidate: Any) -> Tuple[str, str]:
288
  """
289
  Parses a Gemini response candidate's content parts to separate reasoning and actual content.
 
341
  final_normal_content_str = deobfuscate_text(final_normal_content_str)
342
 
343
  if hasattr(candidate, 'safety_ratings') and candidate.safety_ratings:
344
+ safety_ratings_html = _create_safety_ratings_html(candidate.safety_ratings)
 
 
 
 
 
 
 
345
  if final_reasoning_content_str:
346
+ final_reasoning_content_str += safety_ratings_html
347
  else:
348
+ final_normal_content_str += safety_ratings_html
349
 
350
  message_payload = {"role": "assistant", "content": final_normal_content_str}
351
  if final_reasoning_content_str:
 
389
  normal_text = deobfuscate_text(normal_text)
390
 
391
  if hasattr(candidate, 'safety_ratings') and candidate.safety_ratings:
392
+ safety_ratings_html = _create_safety_ratings_html(candidate.safety_ratings)
 
 
 
 
 
 
 
393
  if reasoning_text:
394
+ reasoning_text += safety_ratings_html
395
  else:
396
+ normal_text += safety_ratings_html
397
 
398
  if reasoning_text: delta_payload['reasoning_content'] = reasoning_text
399
  if normal_text or (not reasoning_text and not delta_payload): # Ensure content key if nothing else