bibibi12345 commited on
Commit
ef5e32f
·
verified ·
1 Parent(s): 8dd7c3c

Update app/main.py

Browse files
Files changed (1) hide show
  1. app/main.py +4 -28
app/main.py CHANGED
@@ -314,31 +314,9 @@ def create_gemini_prompt_old(messages: List[OpenAIMessage]) -> Union[str, List[A
314
  # If no images, use the text-only format
315
  if not has_images:
316
  prompt = ""
317
-
318
- # Extract system message if present
319
- system_message = None
320
- # Process all messages in their original order
321
- for message in messages:
322
- if message.role == "system":
323
- # Handle both string and list[dict] content types
324
- if isinstance(message.content, str):
325
- system_message = message.content
326
- elif isinstance(message.content, list) and message.content and isinstance(message.content[0], dict) and 'text' in message.content[0]:
327
- system_message = message.content[0]['text']
328
- else:
329
- # Handle unexpected format or raise error? For now, assume it's usable or skip.
330
- system_message = str(message.content) # Fallback, might need refinement
331
- break
332
-
333
- # If system message exists, prepend it
334
- if system_message:
335
- prompt += f"System: {system_message}\n\n"
336
 
337
  # Add other messages
338
  for message in messages:
339
- if message.role == "system":
340
- continue # Already handled
341
-
342
  # Handle both string and list[dict] content types
343
  content_text = ""
344
  if isinstance(message.content, str):
@@ -385,12 +363,10 @@ def create_gemini_prompt_old(messages: List[OpenAIMessage]) -> Union[str, List[A
385
  # Process user and assistant messages
386
  # Process all messages in their original order
387
  for message in messages:
388
- if message.role == "system":
389
- continue # Already handled
390
 
391
  # For string content, add as text
392
  if isinstance(message.content, str):
393
- prefix = "Human: " if message.role == "user" else "AI: "
394
  gemini_contents.append(f"{prefix}{message.content}")
395
 
396
  # For list content, process each part
@@ -407,7 +383,7 @@ def create_gemini_prompt_old(messages: List[OpenAIMessage]) -> Union[str, List[A
407
 
408
  # Add the combined text content if any
409
  if text_content:
410
- prefix = "Human: " if message.role == "user" else "AI: "
411
  gemini_contents.append(f"{prefix}{text_content}")
412
 
413
  # Then process image parts
@@ -1167,8 +1143,8 @@ async def chat_completions(request: OpenAIRequest, api_key: str = Depends(get_ap
1167
 
1168
  attempts = [
1169
  {"name": "base", "model": base_model_name, "prompt_func": create_gemini_prompt, "config_modifier": lambda c: c},
1170
- {"name": "old_format", "model": base_model_name, "prompt_func": create_gemini_prompt_old, "config_modifier": lambda c: c},
1171
- {"name": "encrypt", "model": base_model_name, "prompt_func": create_encrypted_gemini_prompt, "config_modifier": lambda c: {**c, "system_instruction": encryption_instructions}}
1172
  ]
1173
 
1174
  for i, attempt in enumerate(attempts):
 
314
  # If no images, use the text-only format
315
  if not has_images:
316
  prompt = ""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
317
 
318
  # Add other messages
319
  for message in messages:
 
 
 
320
  # Handle both string and list[dict] content types
321
  content_text = ""
322
  if isinstance(message.content, str):
 
363
  # Process user and assistant messages
364
  # Process all messages in their original order
365
  for message in messages:
 
 
366
 
367
  # For string content, add as text
368
  if isinstance(message.content, str):
369
+ prefix = "Human: " if message.role == "user" or message.role == "system" else "AI: "
370
  gemini_contents.append(f"{prefix}{message.content}")
371
 
372
  # For list content, process each part
 
383
 
384
  # Add the combined text content if any
385
  if text_content:
386
+ prefix = "Human: " if message.role == "user" or message.role == "system" else "AI: "
387
  gemini_contents.append(f"{prefix}{text_content}")
388
 
389
  # Then process image parts
 
1143
 
1144
  attempts = [
1145
  {"name": "base", "model": base_model_name, "prompt_func": create_gemini_prompt, "config_modifier": lambda c: c},
1146
+ {"name": "encrypt", "model": base_model_name, "prompt_func": create_encrypted_gemini_prompt, "config_modifier": lambda c: {**c, "system_instruction": encryption_instructions}},
1147
+ {"name": "old_format", "model": base_model_name, "prompt_func": create_gemini_prompt_old, "config_modifier": lambda c: c}
1148
  ]
1149
 
1150
  for i, attempt in enumerate(attempts):