bibibi12345 commited on
Commit
3195985
·
verified ·
1 Parent(s): b4e89db

Update app/main.py

Browse files
Files changed (1) hide show
  1. app/main.py +7 -7
app/main.py CHANGED
@@ -985,8 +985,8 @@ def convert_to_openai_format(gemini_response, model: str) -> Dict[str, Any]:
985
  # Handle case where response might just have text directly (less common now)
986
  elif hasattr(gemini_response, 'text'):
987
  content = gemini_response.text
988
- if is_encrypt_full:
989
- content = deobfuscate_text(content)
990
  choices.append({
991
  "index": 0,
992
  "message": {
@@ -1044,8 +1044,8 @@ def convert_chunk_to_openai(chunk, model: str, response_id: str, candidate_index
1044
  chunk_content = chunk.text
1045
 
1046
  # Apply deobfuscation if it was an encrypt-full model
1047
- if is_encrypt_full:
1048
- chunk_content = deobfuscate_text(chunk_content)
1049
 
1050
  # Determine finish reason (simplified)
1051
  finish_reason = None
@@ -1754,9 +1754,9 @@ async def fake_stream_generator(client_instance, model_name, prompt, current_gen
1754
  # raise ValueError("No text content found in response") # Option to raise error
1755
 
1756
  # --- Apply Deobfuscation if needed ---
1757
- if request.model.endswith("-encrypt-full"):
1758
- print(f"FAKE STREAMING: Deobfuscating full text for {request.model}")
1759
- full_text = deobfuscate_text(full_text)
1760
  # --- End Deobfuscation ---
1761
 
1762
  print(f"FAKE STREAMING: Received full response ({len(full_text)} chars), chunking into smaller pieces")
 
985
  # Handle case where response might just have text directly (less common now)
986
  elif hasattr(gemini_response, 'text'):
987
  content = gemini_response.text
988
+ # if is_encrypt_full:
989
+ # content = deobfuscate_text(content)
990
  choices.append({
991
  "index": 0,
992
  "message": {
 
1044
  chunk_content = chunk.text
1045
 
1046
  # Apply deobfuscation if it was an encrypt-full model
1047
+ # if is_encrypt_full:
1048
+ # chunk_content = deobfuscate_text(chunk_content)
1049
 
1050
  # Determine finish reason (simplified)
1051
  finish_reason = None
 
1754
  # raise ValueError("No text content found in response") # Option to raise error
1755
 
1756
  # --- Apply Deobfuscation if needed ---
1757
+ # if request.model.endswith("-encrypt-full"):
1758
+ # print(f"FAKE STREAMING: Deobfuscating full text for {request.model}")
1759
+ # full_text = deobfuscate_text(full_text)
1760
  # --- End Deobfuscation ---
1761
 
1762
  print(f"FAKE STREAMING: Received full response ({len(full_text)} chars), chunking into smaller pieces")