bibibi12345 commited on
Commit
dd504cd
·
1 Parent(s): 2a81a94

added more error handling

Browse files
Files changed (2) hide show
  1. app/api_helpers.py +54 -20
  2. app/message_processing.py +29 -13
app/api_helpers.py CHANGED
@@ -45,21 +45,39 @@ def create_generation_config(request: OpenAIRequest) -> Dict[str, Any]:
45
  return config
46
 
47
  def is_response_valid(response):
48
- if response is None: return False
49
- if hasattr(response, 'text') and response.text: return True
 
 
 
 
 
 
 
 
50
  if hasattr(response, 'candidates') and response.candidates:
51
- candidate = response.candidates[0]
52
- if hasattr(candidate, 'text') and candidate.text: return True
53
- if hasattr(candidate, 'content') and hasattr(candidate.content, 'parts'):
54
- for part in candidate.content.parts:
55
- if hasattr(part, 'text') and part.text: return True
56
- if hasattr(response, 'candidates') and response.candidates: return True # For fake streaming
57
- for attr in dir(response):
58
- if attr.startswith('_'): continue
59
- try:
60
- if isinstance(getattr(response, attr), str) and getattr(response, attr): return True
61
- except: pass
62
- print("DEBUG: Response is invalid, no usable content found")
 
 
 
 
 
 
 
 
 
 
63
  return False
64
 
65
  async def fake_stream_generator(client_instance, model_name: str, prompt: Union[types.Content, List[types.Content]], current_gen_config: Dict[str, Any], request_obj: OpenAIRequest):
@@ -83,12 +101,20 @@ async def fake_stream_generator(client_instance, model_name: str, prompt: Union[
83
  if not is_response_valid(response):
84
  raise ValueError(f"Invalid/empty response in fake stream: {str(response)[:200]}")
85
  full_text = ""
86
- if hasattr(response, 'text'): full_text = response.text
 
87
  elif hasattr(response, 'candidates') and response.candidates:
 
88
  candidate = response.candidates[0]
89
- if hasattr(candidate, 'text'): full_text = candidate.text
90
- elif hasattr(candidate.content, 'parts'):
91
- full_text = "".join(part.text for part in candidate.content.parts if hasattr(part, 'text'))
 
 
 
 
 
 
92
  if request_obj.model.endswith("-encrypt-full"):
93
  full_text = deobfuscate_text(full_text)
94
 
@@ -141,8 +167,16 @@ async def execute_gemini_call(
141
  yield "data: [DONE]\n\n"
142
  except Exception as e_stream_call:
143
  print(f"Streaming Error in _execute_gemini_call: {e_stream_call}")
144
- err_resp_content_call = create_openai_error_response(500, str(e_stream_call), "server_error")
145
- yield f"data: {json.dumps(err_resp_content_call)}\n\n"
 
 
 
 
 
 
 
 
146
  yield "data: [DONE]\n\n"
147
  raise # Re-raise to be caught by retry logic if any
148
  return StreamingResponse(_stream_generator_inner_for_execute(), media_type="text/event-stream")
 
45
  return config
46
 
47
  def is_response_valid(response):
48
+ if response is None:
49
+ print("DEBUG: Response is None, therefore invalid.")
50
+ return False
51
+
52
+ # Check for direct text attribute
53
+ if hasattr(response, 'text') and isinstance(response.text, str) and response.text.strip():
54
+ # print("DEBUG: Response valid due to response.text")
55
+ return True
56
+
57
+ # Check candidates for text content
58
  if hasattr(response, 'candidates') and response.candidates:
59
+ for candidate in response.candidates: # Iterate through all candidates
60
+ if hasattr(candidate, 'text') and isinstance(candidate.text, str) and candidate.text.strip():
61
+ # print(f"DEBUG: Response valid due to candidate.text in candidate")
62
+ return True
63
+ if hasattr(candidate, 'content') and hasattr(candidate.content, 'parts') and candidate.content.parts:
64
+ for part in candidate.content.parts:
65
+ if hasattr(part, 'text') and isinstance(part.text, str) and part.text.strip():
66
+ # print(f"DEBUG: Response valid due to part.text in candidate's content part")
67
+ return True
68
+
69
+ # Check for prompt_feedback, which indicates the API processed the request,
70
+ # even if the content is empty (e.g. due to safety filtering).
71
+ # The fake_stream_generator should still attempt to process this to convey safety messages if present.
72
+ if hasattr(response, 'prompt_feedback'):
73
+ # Check if there's any block reason, which might be interesting to log or handle
74
+ if hasattr(response.prompt_feedback, 'block_reason') and response.prompt_feedback.block_reason:
75
+ print(f"DEBUG: Response has prompt_feedback with block_reason: {response.prompt_feedback.block_reason}, considering it valid for processing.")
76
+ else:
77
+ print("DEBUG: Response has prompt_feedback (no block_reason), considering it valid for processing.")
78
+ return True
79
+
80
+ print("DEBUG: Response is invalid, no usable text content or prompt_feedback found.")
81
  return False
82
 
83
  async def fake_stream_generator(client_instance, model_name: str, prompt: Union[types.Content, List[types.Content]], current_gen_config: Dict[str, Any], request_obj: OpenAIRequest):
 
101
  if not is_response_valid(response):
102
  raise ValueError(f"Invalid/empty response in fake stream: {str(response)[:200]}")
103
  full_text = ""
104
+ if hasattr(response, 'text'):
105
+ full_text = response.text or "" # Coalesce None to empty string
106
  elif hasattr(response, 'candidates') and response.candidates:
107
+ # Typically, we focus on the first candidate for non-streaming synthesis
108
  candidate = response.candidates[0]
109
+ if hasattr(candidate, 'text'):
110
+ full_text = candidate.text or "" # Coalesce None to empty string
111
+ elif hasattr(candidate, 'content') and hasattr(candidate.content, 'parts') and candidate.content.parts:
112
+ # Ensure parts are iterated and text is joined correctly even if some parts have no text or part.text is None
113
+ texts = []
114
+ for part in candidate.content.parts:
115
+ if hasattr(part, 'text') and part.text is not None: # Check part.text exists and is not None
116
+ texts.append(part.text)
117
+ full_text = "".join(texts)
118
  if request_obj.model.endswith("-encrypt-full"):
119
  full_text = deobfuscate_text(full_text)
120
 
 
167
  yield "data: [DONE]\n\n"
168
  except Exception as e_stream_call:
169
  print(f"Streaming Error in _execute_gemini_call: {e_stream_call}")
170
+
171
+ error_message_str = str(e_stream_call)
172
+ # Truncate very long error messages to prevent excessively large JSON payloads.
173
+ if len(error_message_str) > 1024: # Max length for the error string
174
+ error_message_str = error_message_str[:1024] + "..."
175
+
176
+ err_resp_content_call = create_openai_error_response(500, error_message_str, "server_error")
177
+ json_payload_for_error = json.dumps(err_resp_content_call)
178
+ print(f"DEBUG: Yielding error JSON payload during true streaming: {json_payload_for_error}")
179
+ yield f"data: {json_payload_for_error}\n\n"
180
  yield "data: [DONE]\n\n"
181
  raise # Re-raise to be caught by retry logic if any
182
  return StreamingResponse(_stream_generator_inner_for_execute(), media_type="text/event-stream")
app/message_processing.py CHANGED
@@ -344,11 +344,14 @@ def convert_to_openai_format(gemini_response, model: str) -> Dict[str, Any]:
344
  for i, candidate in enumerate(gemini_response.candidates):
345
  content = ""
346
  if hasattr(candidate, 'text'):
347
- content = candidate.text
348
  elif hasattr(candidate, 'content') and hasattr(candidate.content, 'parts'):
 
 
349
  for part_item in candidate.content.parts:
350
- if hasattr(part_item, 'text'):
351
- content += part_item.text
 
352
 
353
  if is_encrypt_full:
354
  content = deobfuscate_text(content)
@@ -359,9 +362,9 @@ def convert_to_openai_format(gemini_response, model: str) -> Dict[str, Any]:
359
  "finish_reason": "stop"
360
  })
361
  elif hasattr(gemini_response, 'text'):
362
- content = gemini_response.text
363
  if is_encrypt_full:
364
- content = deobfuscate_text(content)
365
  choices.append({
366
  "index": 0,
367
  "message": {"role": "assistant", "content": content},
@@ -392,14 +395,27 @@ def convert_to_openai_format(gemini_response, model: str) -> Dict[str, Any]:
392
  def convert_chunk_to_openai(chunk, model: str, response_id: str, candidate_index: int = 0) -> str:
393
  """Converts Gemini stream chunk to OpenAI format, applying deobfuscation if needed."""
394
  is_encrypt_full = model.endswith("-encrypt-full")
395
- chunk_content = ""
396
 
397
- if hasattr(chunk, 'parts') and chunk.parts:
398
- for part_item in chunk.parts:
399
- if hasattr(part_item, 'text'):
400
- chunk_content += part_item.text
401
- elif hasattr(chunk, 'text'):
402
- chunk_content = chunk.text
 
 
 
 
 
 
 
 
 
 
 
 
 
403
 
404
  if is_encrypt_full:
405
  chunk_content = deobfuscate_text(chunk_content)
@@ -415,7 +431,7 @@ def convert_chunk_to_openai(chunk, model: str, response_id: str, candidate_index
415
  "choices": [
416
  {
417
  "index": candidate_index,
418
- "delta": {**({"content": chunk_content} if chunk_content else {})},
419
  "finish_reason": finish_reason
420
  }
421
  ]
 
344
  for i, candidate in enumerate(gemini_response.candidates):
345
  content = ""
346
  if hasattr(candidate, 'text'):
347
+ content = candidate.text or "" # Coalesce None to empty string
348
  elif hasattr(candidate, 'content') and hasattr(candidate.content, 'parts'):
349
+ # Ensure content remains a string even if parts have None text
350
+ parts_texts = []
351
  for part_item in candidate.content.parts:
352
+ if hasattr(part_item, 'text') and part_item.text is not None:
353
+ parts_texts.append(part_item.text)
354
+ content = "".join(parts_texts)
355
 
356
  if is_encrypt_full:
357
  content = deobfuscate_text(content)
 
362
  "finish_reason": "stop"
363
  })
364
  elif hasattr(gemini_response, 'text'):
365
+ content = gemini_response.text or "" # Coalesce None to empty string
366
  if is_encrypt_full:
367
+ content = deobfuscate_text(content) # deobfuscate_text should also be robust to empty string
368
  choices.append({
369
  "index": 0,
370
  "message": {"role": "assistant", "content": content},
 
395
  def convert_chunk_to_openai(chunk, model: str, response_id: str, candidate_index: int = 0) -> str:
396
  """Converts Gemini stream chunk to OpenAI format, applying deobfuscation if needed."""
397
  is_encrypt_full = model.endswith("-encrypt-full")
398
+ chunk_content_str = "" # Renamed for clarity and to ensure it's always a string
399
 
400
+ try:
401
+ if hasattr(chunk, 'parts') and chunk.parts:
402
+ current_parts_texts = []
403
+ for part_item in chunk.parts:
404
+ # Ensure part_item.text exists, is not None, and convert to string
405
+ if hasattr(part_item, 'text') and part_item.text is not None:
406
+ current_parts_texts.append(str(part_item.text))
407
+ chunk_content_str = "".join(current_parts_texts)
408
+ elif hasattr(chunk, 'text') and chunk.text is not None:
409
+ # Ensure chunk.text is converted to string if it's not None
410
+ chunk_content_str = str(chunk.text)
411
+ # If chunk has neither .parts nor .text, or if .text is None, chunk_content_str remains ""
412
+ except Exception as e_chunk_extract:
413
+ # Log the error and the problematic chunk structure
414
+ print(f"WARNING: Error extracting content from chunk in convert_chunk_to_openai: {e_chunk_extract}. Chunk type: {type(chunk)}. Chunk data: {str(chunk)[:200]}")
415
+ chunk_content_str = "" # Default to empty string in case of any error
416
+
417
+ if is_encrypt_full:
418
+ chunk_content_str = deobfuscate_text(chunk_content_str) # deobfuscate_text should handle empty string
419
 
420
  if is_encrypt_full:
421
  chunk_content = deobfuscate_text(chunk_content)
 
431
  "choices": [
432
  {
433
  "index": candidate_index,
434
+ "delta": {**({"content": chunk_content_str} if chunk_content_str else {})},
435
  "finish_reason": finish_reason
436
  }
437
  ]