Niansuh commited on
Commit
4bfb2fb
·
verified ·
1 Parent(s): 711a467

Update api/utils.py

Browse files
Files changed (1) hide show
  1. api/utils.py +34 -71
api/utils.py CHANGED
@@ -1,5 +1,3 @@
1
- # utils.py
2
-
3
  from datetime import datetime
4
  import json
5
  import uuid
@@ -12,18 +10,23 @@ import httpx
12
  from fastapi import HTTPException
13
  from api.config import (
14
  MODEL_MAPPING,
 
 
15
  BASE_URL,
16
  AGENT_MODE,
17
  TRENDING_AGENT_MODE,
 
 
18
  )
19
  from api.models import ChatRequest
20
  from api.logger import setup_logger
21
 
22
  logger = setup_logger(__name__)
23
 
24
- # Helper function to create a UUID-based chat ID
25
- def generate_chat_id() -> str:
26
- return str(uuid.uuid4())
 
27
 
28
  # Helper function to create chat completion data
29
  def create_chat_completion_data(
@@ -44,15 +47,11 @@ def create_chat_completion_data(
44
  "usage": None,
45
  }
46
 
47
- # Function to convert message to dictionary format, ensuring base64 data
48
- def message_to_dict(message):
49
- if isinstance(message.content, str):
50
- content = message.content
51
- elif isinstance(message.content, list) and len(message.content) > 0:
52
- content = message.content[0].get("text", "")
53
- else:
54
- content = ""
55
-
56
  if isinstance(message.content, list) and len(message.content) == 2 and "image_url" in message.content[1]:
57
  # Ensure base64 images are always included for all models
58
  return {
@@ -67,18 +66,21 @@ def message_to_dict(message):
67
  return {"role": message.role, "content": content}
68
 
69
  # Function to strip model prefix from content if present
70
- def strip_model_prefix(content: str) -> str:
71
- """Function retained but no model_prefix is used."""
72
- # Since model_prefix is removed, this function can simply return content
 
 
73
  return content
74
 
75
  # Function to get the correct referer URL for logging
76
  def get_referer_url(chat_id: str, model: str) -> str:
77
  """Generate the referer URL based on specific models listed in MODEL_REFERERS."""
78
- # MODEL_REFERERS has been removed; referer_url defaults to BASE_URL
 
79
  return BASE_URL
80
 
81
- # Process streaming response without model prefixes and referers
82
  async def process_streaming_response(request: ChatRequest):
83
  chat_id = generate_chat_id()
84
  referer_url = get_referer_url(chat_id, request.model)
@@ -86,22 +88,9 @@ async def process_streaming_response(request: ChatRequest):
86
 
87
  agent_mode = AGENT_MODE.get(request.model, {})
88
  trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
89
-
90
- headers_api_chat = {
91
- 'Accept': '*/*',
92
- 'Accept-Language': 'en-US,en;q=0.9',
93
- 'Cache-Control': 'no-cache',
94
- 'Origin': BASE_URL,
95
- 'Pragma': 'no-cache',
96
- 'Priority': 'u=1, i',
97
- 'User-Agent': (
98
- 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) '
99
- 'AppleWebKit/537.36 (KHTML, like Gecko) '
100
- 'Chrome/130.0.0.0 Safari/537.36'
101
- ),
102
- 'Content-Type': 'application/json',
103
- 'Referer': referer_url, # Retain referer if necessary
104
- }
105
 
106
  if request.model == 'o1-preview':
107
  delay_seconds = random.randint(1, 60)
@@ -119,7 +108,7 @@ async def process_streaming_response(request: ChatRequest):
119
  "isChromeExt": False,
120
  "isMicMode": False,
121
  "maxTokens": request.max_tokens,
122
- "messages": [message_to_dict(msg) for msg in request.messages],
123
  "mobileClient": False,
124
  "playgroundTemperature": request.temperature,
125
  "playgroundTopP": request.top_p,
@@ -148,7 +137,7 @@ async def process_streaming_response(request: ChatRequest):
148
  content = line
149
  if content.startswith("$@$v=undefined-rv1$@$"):
150
  content = content[21:]
151
- cleaned_content = strip_model_prefix(content)
152
  yield f"data: {json.dumps(create_chat_completion_data(cleaned_content, request.model, timestamp))}\n\n"
153
 
154
  yield f"data: {json.dumps(create_chat_completion_data('', request.model, timestamp, 'stop'))}\n\n"
@@ -160,7 +149,7 @@ async def process_streaming_response(request: ChatRequest):
160
  logger.error(f"Error occurred during request for Chat ID {chat_id}: {e}")
161
  raise HTTPException(status_code=500, detail=str(e))
162
 
163
- # Process non-streaming response without model prefixes and referers
164
  async def process_non_streaming_response(request: ChatRequest):
165
  chat_id = generate_chat_id()
166
  referer_url = get_referer_url(chat_id, request.model)
@@ -168,35 +157,10 @@ async def process_non_streaming_response(request: ChatRequest):
168
 
169
  agent_mode = AGENT_MODE.get(request.model, {})
170
  trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
171
-
172
- headers_api_chat = {
173
- 'Accept': '*/*',
174
- 'Accept-Language': 'en-US,en;q=0.9',
175
- 'Cache-Control': 'no-cache',
176
- 'Origin': BASE_URL,
177
- 'Pragma': 'no-cache',
178
- 'Priority': 'u=1, i',
179
- 'User-Agent': (
180
- 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) '
181
- 'AppleWebKit/537.36 (KHTML, like Gecko) '
182
- 'Chrome/130.0.0.0 Safari/537.36'
183
- ),
184
- 'Content-Type': 'application/json',
185
- 'Referer': referer_url, # Retain referer if necessary
186
- }
187
- headers_chat = {
188
- 'Accept': '*/*',
189
- 'Accept-Language': 'en-US,en;q=0.9',
190
- 'Cache-Control': 'no-cache',
191
- 'Content-Type': 'text/plain;charset=UTF-8',
192
- 'Origin': BASE_URL,
193
- 'Pragma': 'no-cache',
194
- 'Priority': 'u=1, i',
195
- 'Referer': referer_url,
196
- 'Next-Action': str(uuid.uuid4()),
197
- 'Next-Router-State-Tree': json.dumps([""]),
198
- 'Next-URL': '/',
199
- }
200
 
201
  if request.model == 'o1-preview':
202
  delay_seconds = random.randint(20, 60)
@@ -214,7 +178,7 @@ async def process_non_streaming_response(request: ChatRequest):
214
  "isChromeExt": False,
215
  "isMicMode": False,
216
  "maxTokens": request.max_tokens,
217
- "messages": [message_to_dict(msg) for msg in request.messages],
218
  "mobileClient": False,
219
  "playgroundTemperature": request.temperature,
220
  "playgroundTopP": request.top_p,
@@ -242,11 +206,10 @@ async def process_non_streaming_response(request: ChatRequest):
242
  except httpx.RequestError as e:
243
  logger.error(f"Error occurred during request for Chat ID {chat_id}: {e}")
244
  raise HTTPException(status_code=500, detail=str(e))
245
-
246
  if full_response.startswith("$@$v=undefined-rv1$@$"):
247
  full_response = full_response[21:]
248
 
249
- cleaned_full_response = strip_model_prefix(full_response)
250
 
251
  return {
252
  "id": f"chatcmpl-{uuid.uuid4()}",
@@ -261,4 +224,4 @@ async def process_non_streaming_response(request: ChatRequest):
261
  }
262
  ],
263
  "usage": None,
264
- }
 
 
 
1
  from datetime import datetime
2
  import json
3
  import uuid
 
10
  from fastapi import HTTPException
11
  from api.config import (
12
  MODEL_MAPPING,
13
+ get_headers_api_chat,
14
+ get_headers_chat,
15
  BASE_URL,
16
  AGENT_MODE,
17
  TRENDING_AGENT_MODE,
18
+ MODEL_PREFIXES,
19
+ MODEL_REFERERS
20
  )
21
  from api.models import ChatRequest
22
  from api.logger import setup_logger
23
 
24
  logger = setup_logger(__name__)
25
 
26
+ # Helper function to create a random alphanumeric chat ID
27
+ def generate_chat_id(length: int = 7) -> str:
28
+ characters = string.ascii_letters + string.digits
29
+ return ''.join(random.choices(characters, k=length))
30
 
31
  # Helper function to create chat completion data
32
  def create_chat_completion_data(
 
47
  "usage": None,
48
  }
49
 
50
+ # Function to convert message to dictionary format, ensuring base64 data and optional model prefix
51
+ def message_to_dict(message, model_prefix: Optional[str] = None):
52
+ content = message.content if isinstance(message.content, str) else message.content[0]["text"]
53
+ if model_prefix:
54
+ content = f"{model_prefix} {content}"
 
 
 
 
55
  if isinstance(message.content, list) and len(message.content) == 2 and "image_url" in message.content[1]:
56
  # Ensure base64 images are always included for all models
57
  return {
 
66
  return {"role": message.role, "content": content}
67
 
68
  # Function to strip model prefix from content if present
69
+ def strip_model_prefix(content: str, model_prefix: Optional[str] = None) -> str:
70
+ """Remove the model prefix from the response content if present."""
71
+ if model_prefix and content.startswith(model_prefix):
72
+ logger.debug(f"Stripping prefix '{model_prefix}' from content.")
73
+ return content[len(model_prefix):].strip()
74
  return content
75
 
76
  # Function to get the correct referer URL for logging
77
  def get_referer_url(chat_id: str, model: str) -> str:
78
  """Generate the referer URL based on specific models listed in MODEL_REFERERS."""
79
+ if model in MODEL_REFERERS:
80
+ return f"{BASE_URL}/chat/{chat_id}?model={model}"
81
  return BASE_URL
82
 
83
+ # Process streaming response with headers from config.py
84
  async def process_streaming_response(request: ChatRequest):
85
  chat_id = generate_chat_id()
86
  referer_url = get_referer_url(chat_id, request.model)
 
88
 
89
  agent_mode = AGENT_MODE.get(request.model, {})
90
  trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
91
+ model_prefix = MODEL_PREFIXES.get(request.model, "")
92
+
93
+ headers_api_chat = get_headers_api_chat(referer_url)
 
 
 
 
 
 
 
 
 
 
 
 
 
94
 
95
  if request.model == 'o1-preview':
96
  delay_seconds = random.randint(1, 60)
 
108
  "isChromeExt": False,
109
  "isMicMode": False,
110
  "maxTokens": request.max_tokens,
111
+ "messages": [message_to_dict(msg, model_prefix=model_prefix) for msg in request.messages],
112
  "mobileClient": False,
113
  "playgroundTemperature": request.temperature,
114
  "playgroundTopP": request.top_p,
 
137
  content = line
138
  if content.startswith("$@$v=undefined-rv1$@$"):
139
  content = content[21:]
140
+ cleaned_content = strip_model_prefix(content, model_prefix)
141
  yield f"data: {json.dumps(create_chat_completion_data(cleaned_content, request.model, timestamp))}\n\n"
142
 
143
  yield f"data: {json.dumps(create_chat_completion_data('', request.model, timestamp, 'stop'))}\n\n"
 
149
  logger.error(f"Error occurred during request for Chat ID {chat_id}: {e}")
150
  raise HTTPException(status_code=500, detail=str(e))
151
 
152
+ # Process non-streaming response with headers from config.py
153
  async def process_non_streaming_response(request: ChatRequest):
154
  chat_id = generate_chat_id()
155
  referer_url = get_referer_url(chat_id, request.model)
 
157
 
158
  agent_mode = AGENT_MODE.get(request.model, {})
159
  trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
160
+ model_prefix = MODEL_PREFIXES.get(request.model, "")
161
+
162
+ headers_api_chat = get_headers_api_chat(referer_url)
163
+ headers_chat = get_headers_chat(referer_url, next_action=str(uuid.uuid4()), next_router_state_tree=json.dumps([""]))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
164
 
165
  if request.model == 'o1-preview':
166
  delay_seconds = random.randint(20, 60)
 
178
  "isChromeExt": False,
179
  "isMicMode": False,
180
  "maxTokens": request.max_tokens,
181
+ "messages": [message_to_dict(msg, model_prefix=model_prefix) for msg in request.messages],
182
  "mobileClient": False,
183
  "playgroundTemperature": request.temperature,
184
  "playgroundTopP": request.top_p,
 
206
  except httpx.RequestError as e:
207
  logger.error(f"Error occurred during request for Chat ID {chat_id}: {e}")
208
  raise HTTPException(status_code=500, detail=str(e))
 
209
  if full_response.startswith("$@$v=undefined-rv1$@$"):
210
  full_response = full_response[21:]
211
 
212
+ cleaned_full_response = strip_model_prefix(full_response, model_prefix)
213
 
214
  return {
215
  "id": f"chatcmpl-{uuid.uuid4()}",
 
224
  }
225
  ],
226
  "usage": None,
227
+ }