Niansuh commited on
Commit
beb8f59
·
verified ·
1 Parent(s): a6e9803

Update api/utils.py

Browse files
Files changed (1) hide show
  1. api/utils.py +271 -227
api/utils.py CHANGED
@@ -1,227 +1,271 @@
1
- from datetime import datetime
2
- import json
3
- import uuid
4
- import asyncio
5
- import random
6
- import string
7
- from typing import Any, Dict, Optional
8
-
9
- import httpx
10
- from fastapi import HTTPException
11
- from api.config import (
12
- MODEL_MAPPING,
13
- get_headers_api_chat,
14
- get_headers_chat,
15
- BASE_URL,
16
- AGENT_MODE,
17
- TRENDING_AGENT_MODE,
18
- MODEL_PREFIXES,
19
- MODEL_REFERERS
20
- )
21
- from api.models import ChatRequest
22
- from api.logger import setup_logger
23
-
24
- logger = setup_logger(__name__)
25
-
26
- # Helper function to create a random alphanumeric chat ID
27
- def generate_chat_id(length: int = 7) -> str:
28
- characters = string.ascii_letters + string.digits
29
- return ''.join(random.choices(characters, k=length))
30
-
31
- # Helper function to create chat completion data
32
- def create_chat_completion_data(
33
- content: str, model: str, timestamp: int, finish_reason: Optional[str] = None
34
- ) -> Dict[str, Any]:
35
- return {
36
- "id": f"chatcmpl-{uuid.uuid4()}",
37
- "object": "chat.completion.chunk",
38
- "created": timestamp,
39
- "model": model,
40
- "choices": [
41
- {
42
- "index": 0,
43
- "delta": {"content": content, "role": "assistant"},
44
- "finish_reason": finish_reason,
45
- }
46
- ],
47
- "usage": None,
48
- }
49
-
50
- # Function to convert message to dictionary format, ensuring base64 data and optional model prefix
51
- def message_to_dict(message, model_prefix: Optional[str] = None):
52
- content = message.content if isinstance(message.content, str) else message.content[0]["text"]
53
- if model_prefix:
54
- content = f"{model_prefix} {content}"
55
- if isinstance(message.content, list) and len(message.content) == 2 and "image_url" in message.content[1]:
56
- # Ensure base64 images are always included for all models
57
- return {
58
- "role": message.role,
59
- "content": content,
60
- "data": {
61
- "imageBase64": message.content[1]["image_url"]["url"],
62
- "fileText": "",
63
- "title": "snapshot",
64
- },
65
- }
66
- return {"role": message.role, "content": content}
67
-
68
- # Function to strip model prefix from content if present
69
- def strip_model_prefix(content: str, model_prefix: Optional[str] = None) -> str:
70
- """Remove the model prefix from the response content if present."""
71
- if model_prefix and content.startswith(model_prefix):
72
- logger.debug(f"Stripping prefix '{model_prefix}' from content.")
73
- return content[len(model_prefix):].strip()
74
- return content
75
-
76
- # Function to get the correct referer URL for logging
77
- def get_referer_url(chat_id: str, model: str) -> str:
78
- """Generate the referer URL based on specific models listed in MODEL_REFERERS."""
79
- if model in MODEL_REFERERS:
80
- return f"{BASE_URL}/chat/{chat_id}?model={model}"
81
- return BASE_URL
82
-
83
- # Process streaming response with headers from config.py
84
- async def process_streaming_response(request: ChatRequest):
85
- chat_id = generate_chat_id()
86
- referer_url = get_referer_url(chat_id, request.model)
87
- logger.info(f"Generated Chat ID: {chat_id} - Model: {request.model} - URL: {referer_url}")
88
-
89
- agent_mode = AGENT_MODE.get(request.model, {})
90
- trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
91
- model_prefix = MODEL_PREFIXES.get(request.model, "")
92
-
93
- headers_api_chat = get_headers_api_chat(referer_url)
94
-
95
- if request.model == 'o1-preview':
96
- delay_seconds = random.randint(1, 60)
97
- logger.info(f"Introducing a delay of {delay_seconds} seconds for model 'o1-preview' (Chat ID: {chat_id})")
98
- await asyncio.sleep(delay_seconds)
99
-
100
- json_data = {
101
- "agentMode": agent_mode,
102
- "clickedAnswer2": False,
103
- "clickedAnswer3": False,
104
- "clickedForceWebSearch": False,
105
- "codeModelMode": True,
106
- "githubToken": None,
107
- "id": chat_id,
108
- "isChromeExt": False,
109
- "isMicMode": False,
110
- "maxTokens": request.max_tokens,
111
- "messages": [message_to_dict(msg, model_prefix=model_prefix) for msg in request.messages],
112
- "mobileClient": False,
113
- "playgroundTemperature": request.temperature,
114
- "playgroundTopP": request.top_p,
115
- "previewToken": None,
116
- "trendingAgentMode": trending_agent_mode,
117
- "userId": None,
118
- "userSelectedModel": MODEL_MAPPING.get(request.model, request.model),
119
- "userSystemPrompt": None,
120
- "validated": "69783381-2ce4-4dbd-ac78-35e9063feabc",
121
- "visitFromDelta": False,
122
- }
123
-
124
- async with httpx.AsyncClient() as client:
125
- try:
126
- async with client.stream(
127
- "POST",
128
- f"{BASE_URL}/api/chat",
129
- headers=headers_api_chat,
130
- json=json_data,
131
- timeout=100,
132
- ) as response:
133
- response.raise_for_status()
134
- async for line in response.aiter_lines():
135
- timestamp = int(datetime.now().timestamp())
136
- if line:
137
- content = line
138
- if content.startswith("$@$v=undefined-rv1$@$"):
139
- content = content[21:]
140
- cleaned_content = strip_model_prefix(content, model_prefix)
141
- yield f"data: {json.dumps(create_chat_completion_data(cleaned_content, request.model, timestamp))}\n\n"
142
-
143
- yield f"data: {json.dumps(create_chat_completion_data('', request.model, timestamp, 'stop'))}\n\n"
144
- yield "data: [DONE]\n\n"
145
- except httpx.HTTPStatusError as e:
146
- logger.error(f"HTTP error occurred for Chat ID {chat_id}: {e}")
147
- raise HTTPException(status_code=e.response.status_code, detail=str(e))
148
- except httpx.RequestError as e:
149
- logger.error(f"Error occurred during request for Chat ID {chat_id}: {e}")
150
- raise HTTPException(status_code=500, detail=str(e))
151
-
152
- # Process non-streaming response with headers from config.py
153
- async def process_non_streaming_response(request: ChatRequest):
154
- chat_id = generate_chat_id()
155
- referer_url = get_referer_url(chat_id, request.model)
156
- logger.info(f"Generated Chat ID: {chat_id} - Model: {request.model} - URL: {referer_url}")
157
-
158
- agent_mode = AGENT_MODE.get(request.model, {})
159
- trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
160
- model_prefix = MODEL_PREFIXES.get(request.model, "")
161
-
162
- headers_api_chat = get_headers_api_chat(referer_url)
163
- headers_chat = get_headers_chat(referer_url, next_action=str(uuid.uuid4()), next_router_state_tree=json.dumps([""]))
164
-
165
- if request.model == 'o1-preview':
166
- delay_seconds = random.randint(20, 60)
167
- logger.info(f"Introducing a delay of {delay_seconds} seconds for model 'o1-preview' (Chat ID: {chat_id})")
168
- await asyncio.sleep(delay_seconds)
169
-
170
- json_data = {
171
- "agentMode": agent_mode,
172
- "clickedAnswer2": False,
173
- "clickedAnswer3": False,
174
- "clickedForceWebSearch": False,
175
- "codeModelMode": True,
176
- "githubToken": None,
177
- "id": chat_id,
178
- "isChromeExt": False,
179
- "isMicMode": False,
180
- "maxTokens": request.max_tokens,
181
- "messages": [message_to_dict(msg, model_prefix=model_prefix) for msg in request.messages],
182
- "mobileClient": False,
183
- "playgroundTemperature": request.temperature,
184
- "playgroundTopP": request.top_p,
185
- "previewToken": None,
186
- "trendingAgentMode": trending_agent_mode,
187
- "userId": None,
188
- "userSelectedModel": MODEL_MAPPING.get(request.model, request.model),
189
- "userSystemPrompt": None,
190
- "validated": "69783381-2ce4-4dbd-ac78-35e9063feabc",
191
- "visitFromDelta": False,
192
- }
193
-
194
- full_response = ""
195
- async with httpx.AsyncClient() as client:
196
- try:
197
- async with client.stream(
198
- method="POST", url=f"{BASE_URL}/api/chat", headers=headers_api_chat, json=json_data
199
- ) as response:
200
- response.raise_for_status()
201
- async for chunk in response.aiter_text():
202
- full_response += chunk
203
- except httpx.HTTPStatusError as e:
204
- logger.error(f"HTTP error occurred for Chat ID {chat_id}: {e}")
205
- raise HTTPException(status_code=e.response.status_code, detail=str(e))
206
- except httpx.RequestError as e:
207
- logger.error(f"Error occurred during request for Chat ID {chat_id}: {e}")
208
- raise HTTPException(status_code=500, detail=str(e))
209
- if full_response.startswith("$@$v=undefined-rv1$@$"):
210
- full_response = full_response[21:]
211
-
212
- cleaned_full_response = strip_model_prefix(full_response, model_prefix)
213
-
214
- return {
215
- "id": f"chatcmpl-{uuid.uuid4()}",
216
- "object": "chat.completion",
217
- "created": int(datetime.now().timestamp()),
218
- "model": request.model,
219
- "choices": [
220
- {
221
- "index": 0,
222
- "message": {"role": "assistant", "content": cleaned_full_response},
223
- "finish_reason": "stop",
224
- }
225
- ],
226
- "usage": None,
227
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from datetime import datetime
2
+ import json
3
+ import uuid
4
+ import asyncio
5
+ import random
6
+ import string
7
+ from typing import Any, Dict, Optional
8
+
9
+ import httpx
10
+ from fastapi import HTTPException
11
+ from api.config import (
12
+ MODEL_MAPPING,
13
+ BASE_URL,
14
+ AGENT_MODE,
15
+ TRENDING_AGENT_MODE,
16
+ )
17
+ from api.models import ChatRequest
18
+ from api.logger import setup_logger
19
+
20
+ logger = setup_logger(__name__)
21
+
22
+ # Helper function to create a random alphanumeric chat ID
23
+ def generate_chat_id(length: int = 7) -> str:
24
+ characters = string.ascii_letters + string.digits
25
+ return ''.join(random.choices(characters, k=length))
26
+
27
+ # Helper function to create chat completion data
28
+ def create_chat_completion_data(
29
+ content: str, model: str, timestamp: int, finish_reason: Optional[str] = None
30
+ ) -> Dict[str, Any]:
31
+ return {
32
+ "id": f"chatcmpl-{uuid.uuid4()}",
33
+ "object": "chat.completion.chunk",
34
+ "created": timestamp,
35
+ "model": model,
36
+ "choices": [
37
+ {
38
+ "index": 0,
39
+ "delta": {"content": content, "role": "assistant"},
40
+ "finish_reason": finish_reason,
41
+ }
42
+ ],
43
+ "usage": None,
44
+ }
45
+
46
+ # Function to convert message to dictionary format, ensuring base64 data
47
+ def message_to_dict(message):
48
+ if isinstance(message.content, str):
49
+ content = message.content
50
+ elif isinstance(message.content, list) and len(message.content) > 0:
51
+ content = message.content[0].get("text", "")
52
+ else:
53
+ content = ""
54
+
55
+ if isinstance(message.content, list) and len(message.content) == 2 and "image_url" in message.content[1]:
56
+ # Ensure base64 images are always included for all models
57
+ return {
58
+ "role": message.role,
59
+ "content": content,
60
+ "data": {
61
+ "imageBase64": message.content[1]["image_url"]["url"],
62
+ "fileText": "",
63
+ "title": "snapshot",
64
+ },
65
+ }
66
+ return {"role": message.role, "content": content}
67
+
68
+ # Function to strip model prefix from content if present
69
+ def strip_model_prefix(content: str) -> str:
70
+ """Function retained but no model_prefix is used."""
71
+ # Since model_prefix is removed, this function can simply return content
72
+ return content
73
+
74
+ # Function to get the correct referer URL for logging
75
+ def get_referer_url(chat_id: str, model: str) -> str:
76
+ """Generate the referer URL based on specific models listed in MODEL_REFERERS."""
77
+ # MODEL_REFERERS has been removed; referer_url defaults to BASE_URL
78
+ return BASE_URL
79
+
80
+ # Process streaming response without model prefixes and referers
81
+ async def process_streaming_response(request: ChatRequest):
82
+ chat_id = generate_chat_id()
83
+ referer_url = get_referer_url(chat_id, request.model)
84
+ logger.info(f"Generated Chat ID: {chat_id} - Model: {request.model} - URL: {referer_url}")
85
+
86
+ agent_mode = AGENT_MODE.get(request.model, {})
87
+ trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
88
+
89
+ headers_api_chat = {
90
+ 'accept': '*/*',
91
+ 'accept-language': 'en-US,en;q=0.9',
92
+ 'cache-control': 'no-cache',
93
+ 'origin': BASE_URL,
94
+ 'pragma': 'no-cache',
95
+ 'priority': 'u=1, i',
96
+ 'sec-ch-ua': '"Chromium";v="130", "Google Chrome";v="130", "Not?A_Brand";v="99"',
97
+ 'sec-ch-ua-mobile': '?0',
98
+ 'sec-ch-ua-platform': '"Windows"',
99
+ 'sec-fetch-dest': 'empty',
100
+ 'sec-fetch-mode': 'cors',
101
+ 'sec-fetch-site': 'same-origin',
102
+ 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) '
103
+ 'AppleWebKit/537.36 (KHTML, like Gecko) '
104
+ 'Chrome/130.0.0.0 Safari/537.36',
105
+ 'Content-Type': 'application/json',
106
+ 'Referer': referer_url, # Retain referer if necessary
107
+ }
108
+
109
+ if request.model == 'o1-preview':
110
+ delay_seconds = random.randint(1, 60)
111
+ logger.info(f"Introducing a delay of {delay_seconds} seconds for model 'o1-preview' (Chat ID: {chat_id})")
112
+ await asyncio.sleep(delay_seconds)
113
+
114
+ json_data = {
115
+ "agentMode": agent_mode,
116
+ "clickedAnswer2": False,
117
+ "clickedAnswer3": False,
118
+ "clickedForceWebSearch": False,
119
+ "codeModelMode": True,
120
+ "githubToken": None,
121
+ "id": chat_id,
122
+ "isChromeExt": False,
123
+ "isMicMode": False,
124
+ "maxTokens": request.max_tokens,
125
+ "messages": [message_to_dict(msg) for msg in request.messages],
126
+ "mobileClient": False,
127
+ "playgroundTemperature": request.temperature,
128
+ "playgroundTopP": request.top_p,
129
+ "previewToken": None,
130
+ "trendingAgentMode": trending_agent_mode,
131
+ "userId": None,
132
+ "userSelectedModel": MODEL_MAPPING.get(request.model, request.model),
133
+ "userSystemPrompt": None,
134
+ "validated": "69783381-2ce4-4dbd-ac78-35e9063feabc",
135
+ "visitFromDelta": False,
136
+ }
137
+
138
+ async with httpx.AsyncClient() as client:
139
+ try:
140
+ async with client.stream(
141
+ "POST",
142
+ f"{BASE_URL}/api/chat",
143
+ headers=headers_api_chat,
144
+ json=json_data,
145
+ timeout=100,
146
+ ) as response:
147
+ response.raise_for_status()
148
+ async for line in response.aiter_lines():
149
+ timestamp = int(datetime.now().timestamp())
150
+ if line:
151
+ content = line
152
+ if content.startswith("$@$v=undefined-rv1$@$"):
153
+ content = content[21:]
154
+ cleaned_content = strip_model_prefix(content)
155
+ yield f"data: {json.dumps(create_chat_completion_data(cleaned_content, request.model, timestamp))}\n\n"
156
+
157
+ yield f"data: {json.dumps(create_chat_completion_data('', request.model, timestamp, 'stop'))}\n\n"
158
+ yield "data: [DONE]\n\n"
159
+ except httpx.HTTPStatusError as e:
160
+ logger.error(f"HTTP error occurred for Chat ID {chat_id}: {e}")
161
+ raise HTTPException(status_code=e.response.status_code, detail=str(e))
162
+ except httpx.RequestError as e:
163
+ logger.error(f"Error occurred during request for Chat ID {chat_id}: {e}")
164
+ raise HTTPException(status_code=500, detail=str(e))
165
+
166
+ # Process non-streaming response without model prefixes and referers
167
+ async def process_non_streaming_response(request: ChatRequest):
168
+ chat_id = generate_chat_id()
169
+ referer_url = get_referer_url(chat_id, request.model)
170
+ logger.info(f"Generated Chat ID: {chat_id} - Model: {request.model} - URL: {referer_url}")
171
+
172
+ agent_mode = AGENT_MODE.get(request.model, {})
173
+ trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
174
+
175
+ headers_api_chat = {
176
+ 'accept': '*/*',
177
+ 'accept-language': 'en-US,en;q=0.9',
178
+ 'cache-control': 'no-cache',
179
+ 'origin': BASE_URL,
180
+ 'pragma': 'no-cache',
181
+ 'priority': 'u=1, i',
182
+ 'sec-ch-ua': '"Chromium";v="130", "Google Chrome";v="130", "Not?A_Brand";v="99"',
183
+ 'sec-ch-ua-mobile': '?0',
184
+ 'sec-ch-ua-platform': '"Windows"',
185
+ 'sec-fetch-dest': 'empty',
186
+ 'sec-fetch-mode': 'cors',
187
+ 'sec-fetch-site': 'same-origin',
188
+ 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) '
189
+ 'AppleWebKit/537.36 (KHTML, like Gecko) '
190
+ 'Chrome/130.0.0.0 Safari/537.36',
191
+ 'Content-Type': 'application/json',
192
+ 'Referer': referer_url, # Retain referer if necessary
193
+ }
194
+ headers_chat = {
195
+ 'accept': '*/*',
196
+ 'accept-language': 'en-US,en;q=0.9',
197
+ 'cache-control': 'no-cache',
198
+ 'content-type': 'text/plain;charset=UTF-8',
199
+ 'origin': BASE_URL,
200
+ 'pragma': 'no-cache',
201
+ 'priority': 'u=1, i',
202
+ 'Referer': referer_url,
203
+ 'next-action': str(uuid.uuid4()),
204
+ 'next-router-state-tree': json.dumps([""]),
205
+ 'next-url': '/',
206
+ }
207
+
208
+ if request.model == 'o1-preview':
209
+ delay_seconds = random.randint(20, 60)
210
+ logger.info(f"Introducing a delay of {delay_seconds} seconds for model 'o1-preview' (Chat ID: {chat_id})")
211
+ await asyncio.sleep(delay_seconds)
212
+
213
+ json_data = {
214
+ "agentMode": agent_mode,
215
+ "clickedAnswer2": False,
216
+ "clickedAnswer3": False,
217
+ "clickedForceWebSearch": False,
218
+ "codeModelMode": True,
219
+ "githubToken": None,
220
+ "id": chat_id,
221
+ "isChromeExt": False,
222
+ "isMicMode": False,
223
+ "maxTokens": request.max_tokens,
224
+ "messages": [message_to_dict(msg) for msg in request.messages],
225
+ "mobileClient": False,
226
+ "playgroundTemperature": request.temperature,
227
+ "playgroundTopP": request.top_p,
228
+ "previewToken": None,
229
+ "trendingAgentMode": trending_agent_mode,
230
+ "userId": None,
231
+ "userSelectedModel": MODEL_MAPPING.get(request.model, request.model),
232
+ "userSystemPrompt": None,
233
+ "validated": "69783381-2ce4-4dbd-ac78-35e9063feabc",
234
+ "visitFromDelta": False,
235
+ }
236
+
237
+ full_response = ""
238
+ async with httpx.AsyncClient() as client:
239
+ try:
240
+ async with client.stream(
241
+ method="POST", url=f"{BASE_URL}/api/chat", headers=headers_api_chat, json=json_data
242
+ ) as response:
243
+ response.raise_for_status()
244
+ async for chunk in response.aiter_text():
245
+ full_response += chunk
246
+ except httpx.HTTPStatusError as e:
247
+ logger.error(f"HTTP error occurred for Chat ID {chat_id}: {e}")
248
+ raise HTTPException(status_code=e.response.status_code, detail=str(e))
249
+ except httpx.RequestError as e:
250
+ logger.error(f"Error occurred during request for Chat ID {chat_id}: {e}")
251
+ raise HTTPException(status_code=500, detail=str(e))
252
+
253
+ if full_response.startswith("$@$v=undefined-rv1$@$"):
254
+ full_response = full_response[21:]
255
+
256
+ cleaned_full_response = strip_model_prefix(full_response)
257
+
258
+ return {
259
+ "id": f"chatcmpl-{uuid.uuid4()}",
260
+ "object": "chat.completion",
261
+ "created": int(datetime.now().timestamp()),
262
+ "model": request.model,
263
+ "choices": [
264
+ {
265
+ "index": 0,
266
+ "message": {"role": "assistant", "content": cleaned_full_response},
267
+ "finish_reason": "stop",
268
+ }
269
+ ],
270
+ "usage": None,
271
+ }