bibibi12345 commited on
Commit
e710d8b
·
verified ·
1 Parent(s): 3195985

Update app/main.py

Browse files
Files changed (1) hide show
  1. app/main.py +11 -9
app/main.py CHANGED
@@ -945,7 +945,9 @@ def deobfuscate_text(text: str) -> str:
945
  text = text.replace("♩", "")
946
  text = text.replace("`♡`", "") # Handle the backtick version too
947
  text = text.replace("♡", "")
 
948
  text = text.replace("``", "")
 
949
 
950
  # Restore triple backticks
951
  text = text.replace(placeholder, "```")
@@ -971,8 +973,8 @@ def convert_to_openai_format(gemini_response, model: str) -> Dict[str, Any]:
971
  content += part.text
972
 
973
  # Apply deobfuscation if it was an encrypt-full model
974
- # if is_encrypt_full:
975
- # content = deobfuscate_text(content)
976
 
977
  choices.append({
978
  "index": i,
@@ -985,8 +987,8 @@ def convert_to_openai_format(gemini_response, model: str) -> Dict[str, Any]:
985
  # Handle case where response might just have text directly (less common now)
986
  elif hasattr(gemini_response, 'text'):
987
  content = gemini_response.text
988
- # if is_encrypt_full:
989
- # content = deobfuscate_text(content)
990
  choices.append({
991
  "index": 0,
992
  "message": {
@@ -1044,8 +1046,8 @@ def convert_chunk_to_openai(chunk, model: str, response_id: str, candidate_index
1044
  chunk_content = chunk.text
1045
 
1046
  # Apply deobfuscation if it was an encrypt-full model
1047
- # if is_encrypt_full:
1048
- # chunk_content = deobfuscate_text(chunk_content)
1049
 
1050
  # Determine finish reason (simplified)
1051
  finish_reason = None
@@ -1754,9 +1756,9 @@ async def fake_stream_generator(client_instance, model_name, prompt, current_gen
1754
  # raise ValueError("No text content found in response") # Option to raise error
1755
 
1756
  # --- Apply Deobfuscation if needed ---
1757
- # if request.model.endswith("-encrypt-full"):
1758
- # print(f"FAKE STREAMING: Deobfuscating full text for {request.model}")
1759
- # full_text = deobfuscate_text(full_text)
1760
  # --- End Deobfuscation ---
1761
 
1762
  print(f"FAKE STREAMING: Received full response ({len(full_text)} chars), chunking into smaller pieces")
 
945
  text = text.replace("♩", "")
946
  text = text.replace("`♡`", "") # Handle the backtick version too
947
  text = text.replace("♡", "")
948
+ text = text.replace("` `", "")
949
  text = text.replace("``", "")
950
+ text = text.replace("`", "")
951
 
952
  # Restore triple backticks
953
  text = text.replace(placeholder, "```")
 
973
  content += part.text
974
 
975
  # Apply deobfuscation if it was an encrypt-full model
976
+ if is_encrypt_full:
977
+ content = deobfuscate_text(content)
978
 
979
  choices.append({
980
  "index": i,
 
987
  # Handle case where response might just have text directly (less common now)
988
  elif hasattr(gemini_response, 'text'):
989
  content = gemini_response.text
990
+ if is_encrypt_full:
991
+ content = deobfuscate_text(content)
992
  choices.append({
993
  "index": 0,
994
  "message": {
 
1046
  chunk_content = chunk.text
1047
 
1048
  # Apply deobfuscation if it was an encrypt-full model
1049
+ if is_encrypt_full:
1050
+ chunk_content = deobfuscate_text(chunk_content)
1051
 
1052
  # Determine finish reason (simplified)
1053
  finish_reason = None
 
1756
  # raise ValueError("No text content found in response") # Option to raise error
1757
 
1758
  # --- Apply Deobfuscation if needed ---
1759
+ if request.model.endswith("-encrypt-full"):
1760
+ print(f"FAKE STREAMING: Deobfuscating full text for {request.model}")
1761
+ full_text = deobfuscate_text(full_text)
1762
  # --- End Deobfuscation ---
1763
 
1764
  print(f"FAKE STREAMING: Received full response ({len(full_text)} chars), chunking into smaller pieces")