Niansuh commited on
Commit
af82b40
·
verified ·
1 Parent(s): ffe19dc

Update api/utils.py

Browse files
Files changed (1) hide show
  1. api/utils.py +241 -480
api/utils.py CHANGED
@@ -1,480 +1,241 @@
1
- from datetime import datetime
2
- import json
3
- from typing import Any, Dict, Optional
4
- import uuid
5
-
6
- import httpx
7
- from api.config import MODEL_MAPPING, headers, AGENT_MODE, TRENDING_AGENT_MODE, BASE_URL
8
- from fastapi import HTTPException
9
- from api.models import ChatRequest
10
-
11
- from api.logger import setup_logger
12
-
13
- logger = setup_logger(__name__)
14
-
15
-
16
- def create_chat_completion_data(
17
- content: str, model: str, timestamp: int, finish_reason: Optional[str] = None
18
- ) -> Dict[str, Any]:
19
- return {
20
- "id": f"chatcmpl-{uuid.uuid4()}",
21
- "object": "chat.completion.chunk",
22
- "created": timestamp,
23
- "model": model,
24
- "choices": [
25
- {
26
- "index": 0,
27
- "delta": {"content": content, "role": "assistant"},
28
- "finish_reason": finish_reason,
29
- }
30
- ],
31
- "usage": None,
32
- }
33
-
34
-
35
- def message_to_dict(message):
36
- if isinstance(message.content, str):
37
- return {"role": message.role, "content": message.content}
38
- elif isinstance(message.content, list) and len(message.content) == 2:
39
- return {
40
- "role": message.role,
41
- "content": message.content[0]["text"],
42
- "data": {
43
- "imageBase64": message.content[1]["image_url"]["url"],
44
- "fileText": "",
45
- "title": "snapshot",
46
- },
47
- }
48
- else:
49
- return {"role": message.role, "content": message.content}
50
-
51
-
52
- async def process_streaming_response(request: ChatRequest):
53
- agent_mode = AGENT_MODE.get(request.model, {})
54
- trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
55
- json_data = {
56
- "messages": [message_to_dict(msg) for msg in request.messages],
57
- "previewToken": None,
58
- "userId": None,
59
- "codeModelMode": True,
60
- "agentMode": agent_mode,
61
- "trendingAgentMode": trending_agent_mode,
62
- "isMicMode": False,
63
- "userSystemPrompt": None,
64
- "maxTokens": request.max_tokens,
65
- "playgroundTopP": request.top_p,
66
- "playgroundTemperature": request.temperature,
67
- "isChromeExt": False,
68
- "githubToken": None,
69
- "clickedAnswer2": False,
70
- "clickedAnswer3": False,
71
- "clickedForceWebSearch": False,
72
- "visitFromDelta": False,
73
- "mobileClient": False,
74
- "userSelectedModel": MODEL_MAPPING.get(request.model, request.model),
75
- }
76
-
77
- async with httpx.AsyncClient() as client:
78
- try:
79
- async with client.stream(
80
- "POST",
81
- f"{BASE_URL}/api/chat",
82
- headers=headers,
83
- json=json_data,
84
- timeout=100,
85
- ) as response:
86
- response.raise_for_status()
87
- async for line in response.aiter_lines():
88
- timestamp = int(datetime.now().timestamp())
89
- if line:
90
- content = line
91
- if content.startswith("$@$v=undefined-rv1$@$"):
92
- content = content[21:]
93
- yield f"data: {json.dumps(create_chat_completion_data(content, request.model, timestamp))}\n\n"
94
-
95
- yield f"data: {json.dumps(create_chat_completion_data('', request.model, timestamp, 'stop'))}\n\n"
96
- yield "data: [DONE]\n\n"
97
- except httpx.HTTPStatusError as e:
98
- logger.error(f"HTTP error occurred: {e}")
99
- raise HTTPException(status_code=e.response.status_code, detail=str(e))
100
- except httpx.RequestError as e:
101
- logger.error(f"Error occurred during request: {e}")
102
- raise HTTPException(status_code=500, detail=str(e))
103
-
104
-
105
- async def process_non_streaming_response(request: ChatRequest):
106
- agent_mode = AGENT_MODE.get(request.model, {})
107
- trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
108
- json_data = {
109
- "messages": [message_to_dict(msg) for msg in request.messages],
110
- "previewToken": None,
111
- "userId": None,
112
- "codeModelMode": True,
113
- "agentMode": agent_mode,
114
- "trendingAgentMode": trending_agent_mode,
115
- "isMicMode": False,
116
- "userSystemPrompt": None,
117
- "maxTokens": request.max_tokens,
118
- "playgroundTopP": request.top_p,
119
- "playgroundTemperature": request.temperature,
120
- "isChromeExt": False,
121
- "githubToken": None,
122
- "clickedAnswer2": False,
123
- "clickedAnswer3": False,
124
- "clickedForceWebSearch": False,
125
- "visitFromDelta": False,
126
- "mobileClient": False,
127
- "userSelectedModel": MODEL_MAPPING.get(request.model, request.model),
128
- }
129
- full_response = ""
130
- async with httpx.AsyncClient() as client:
131
- try:
132
- async with client.stream(
133
- method="POST", url=f"{BASE_URL}/api/chat", headers=headers, json=json_data
134
- ) as response:
135
- response.raise_for_status()
136
- async for chunk in response.aiter_text():
137
- full_response += chunk
138
- except httpx.HTTPStatusError as e:
139
- logger.error(f"HTTP error occurred: {e}")
140
- raise HTTPException(status_code=e.response.status_code, detail=str(e))
141
- except httpx.RequestError as e:
142
- logger.error(f"Error occurred during request: {e}")
143
- raise HTTPException(status_code=500, detail=str(e))
144
- if full_response.startswith("$@$v=undefined-rv1$@$"):
145
- full_response = full_response[21:]
146
-
147
- return {
148
- "id": f"chatcmpl-{uuid.uuid4()}",
149
- "object": "chat.completion",
150
- "created": int(datetime.now().timestamp()),
151
- "model": request.model,
152
- "choices": [
153
- {
154
- "index": 0,
155
- "message": {"role": "assistant", "content": full_response},
156
- "finish_reason": "stop",
157
- }
158
- ],
159
- "usage": None,
160
- }
161
- from datetime import datetime
162
- import json
163
- from typing import Any, Dict, Optional
164
- import uuid
165
-
166
- import httpx
167
- from api.config import MODEL_MAPPING, headers, AGENT_MODE, TRENDING_AGENT_MODE, BASE_URL
168
- from fastapi import HTTPException
169
- from api.models import ChatRequest
170
-
171
- from api.logger import setup_logger
172
-
173
- logger = setup_logger(__name__)
174
-
175
-
176
- def create_chat_completion_data(
177
- content: str, model: str, timestamp: int, finish_reason: Optional[str] = None
178
- ) -> Dict[str, Any]:
179
- return {
180
- "id": f"chatcmpl-{uuid.uuid4()}",
181
- "object": "chat.completion.chunk",
182
- "created": timestamp,
183
- "model": model,
184
- "choices": [
185
- {
186
- "index": 0,
187
- "delta": {"content": content, "role": "assistant"},
188
- "finish_reason": finish_reason,
189
- }
190
- ],
191
- "usage": None,
192
- }
193
-
194
-
195
- def message_to_dict(message):
196
- if isinstance(message.content, str):
197
- return {"role": message.role, "content": message.content}
198
- elif isinstance(message.content, list) and len(message.content) == 2:
199
- return {
200
- "role": message.role,
201
- "content": message.content[0]["text"],
202
- "data": {
203
- "imageBase64": message.content[1]["image_url"]["url"],
204
- "fileText": "",
205
- "title": "snapshot",
206
- },
207
- }
208
- else:
209
- return {"role": message.role, "content": message.content}
210
-
211
-
212
- async def process_streaming_response(request: ChatRequest):
213
- agent_mode = AGENT_MODE.get(request.model, {})
214
- trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
215
- json_data = {
216
- "messages": [message_to_dict(msg) for msg in request.messages],
217
- "previewToken": None,
218
- "userId": None,
219
- "codeModelMode": True,
220
- "agentMode": agent_mode,
221
- "trendingAgentMode": trending_agent_mode,
222
- "isMicMode": False,
223
- "userSystemPrompt": None,
224
- "maxTokens": request.max_tokens,
225
- "playgroundTopP": request.top_p,
226
- "playgroundTemperature": request.temperature,
227
- "isChromeExt": False,
228
- "githubToken": None,
229
- "clickedAnswer2": False,
230
- "clickedAnswer3": False,
231
- "clickedForceWebSearch": False,
232
- "visitFromDelta": False,
233
- "mobileClient": False,
234
- "userSelectedModel": MODEL_MAPPING.get(request.model, request.model),
235
- }
236
-
237
- async with httpx.AsyncClient() as client:
238
- try:
239
- async with client.stream(
240
- "POST",
241
- f"{BASE_URL}/api/chat",
242
- headers=headers,
243
- json=json_data,
244
- timeout=100,
245
- ) as response:
246
- response.raise_for_status()
247
- async for line in response.aiter_lines():
248
- timestamp = int(datetime.now().timestamp())
249
- if line:
250
- content = line
251
- if content.startswith("$@$v=undefined-rv1$@$"):
252
- content = content[21:]
253
- yield f"data: {json.dumps(create_chat_completion_data(content, request.model, timestamp))}\n\n"
254
-
255
- yield f"data: {json.dumps(create_chat_completion_data('', request.model, timestamp, 'stop'))}\n\n"
256
- yield "data: [DONE]\n\n"
257
- except httpx.HTTPStatusError as e:
258
- logger.error(f"HTTP error occurred: {e}")
259
- raise HTTPException(status_code=e.response.status_code, detail=str(e))
260
- except httpx.RequestError as e:
261
- logger.error(f"Error occurred during request: {e}")
262
- raise HTTPException(status_code=500, detail=str(e))
263
-
264
-
265
- async def process_non_streaming_response(request: ChatRequest):
266
- agent_mode = AGENT_MODE.get(request.model, {})
267
- trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
268
- json_data = {
269
- "messages": [message_to_dict(msg) for msg in request.messages],
270
- "previewToken": None,
271
- "userId": None,
272
- "codeModelMode": True,
273
- "agentMode": agent_mode,
274
- "trendingAgentMode": trending_agent_mode,
275
- "isMicMode": False,
276
- "userSystemPrompt": None,
277
- "maxTokens": request.max_tokens,
278
- "playgroundTopP": request.top_p,
279
- "playgroundTemperature": request.temperature,
280
- "isChromeExt": False,
281
- "githubToken": None,
282
- "clickedAnswer2": False,
283
- "clickedAnswer3": False,
284
- "clickedForceWebSearch": False,
285
- "visitFromDelta": False,
286
- "mobileClient": False,
287
- "userSelectedModel": MODEL_MAPPING.get(request.model, request.model),
288
- }
289
- full_response = ""
290
- async with httpx.AsyncClient() as client:
291
- try:
292
- async with client.stream(
293
- method="POST", url=f"{BASE_URL}/api/chat", headers=headers, json=json_data
294
- ) as response:
295
- response.raise_for_status()
296
- async for chunk in response.aiter_text():
297
- full_response += chunk
298
- except httpx.HTTPStatusError as e:
299
- logger.error(f"HTTP error occurred: {e}")
300
- raise HTTPException(status_code=e.response.status_code, detail=str(e))
301
- except httpx.RequestError as e:
302
- logger.error(f"Error occurred during request: {e}")
303
- raise HTTPException(status_code=500, detail=str(e))
304
- if full_response.startswith("$@$v=undefined-rv1$@$"):
305
- full_response = full_response[21:]
306
-
307
- return {
308
- "id": f"chatcmpl-{uuid.uuid4()}",
309
- "object": "chat.completion",
310
- "created": int(datetime.now().timestamp()),
311
- "model": request.model,
312
- "choices": [
313
- {
314
- "index": 0,
315
- "message": {"role": "assistant", "content": full_response},
316
- "finish_reason": "stop",
317
- }
318
- ],
319
- "usage": None,
320
- }
321
- from datetime import datetime
322
- import json
323
- from typing import Any, Dict, Optional
324
- import uuid
325
-
326
- import httpx
327
- from api.config import MODEL_MAPPING, headers, AGENT_MODE, TRENDING_AGENT_MODE, BASE_URL
328
- from fastapi import HTTPException
329
- from api.models import ChatRequest
330
-
331
- from api.logger import setup_logger
332
-
333
- logger = setup_logger(__name__)
334
-
335
-
336
- def create_chat_completion_data(
337
- content: str, model: str, timestamp: int, finish_reason: Optional[str] = None
338
- ) -> Dict[str, Any]:
339
- return {
340
- "id": f"chatcmpl-{uuid.uuid4()}",
341
- "object": "chat.completion.chunk",
342
- "created": timestamp,
343
- "model": model,
344
- "choices": [
345
- {
346
- "index": 0,
347
- "delta": {"content": content, "role": "assistant"},
348
- "finish_reason": finish_reason,
349
- }
350
- ],
351
- "usage": None,
352
- }
353
-
354
-
355
- def message_to_dict(message):
356
- if isinstance(message.content, str):
357
- return {"role": message.role, "content": message.content}
358
- elif isinstance(message.content, list) and len(message.content) == 2:
359
- return {
360
- "role": message.role,
361
- "content": message.content[0]["text"],
362
- "data": {
363
- "imageBase64": message.content[1]["image_url"]["url"],
364
- "fileText": "",
365
- "title": "snapshot",
366
- },
367
- }
368
- else:
369
- return {"role": message.role, "content": message.content}
370
-
371
-
372
- async def process_streaming_response(request: ChatRequest):
373
- agent_mode = AGENT_MODE.get(request.model, {})
374
- trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
375
- json_data = {
376
- "messages": [message_to_dict(msg) for msg in request.messages],
377
- "previewToken": None,
378
- "userId": None,
379
- "codeModelMode": True,
380
- "agentMode": agent_mode,
381
- "trendingAgentMode": trending_agent_mode,
382
- "isMicMode": False,
383
- "userSystemPrompt": None,
384
- "maxTokens": request.max_tokens,
385
- "playgroundTopP": request.top_p,
386
- "playgroundTemperature": request.temperature,
387
- "isChromeExt": False,
388
- "githubToken": None,
389
- "clickedAnswer2": False,
390
- "clickedAnswer3": False,
391
- "clickedForceWebSearch": False,
392
- "visitFromDelta": False,
393
- "mobileClient": False,
394
- "userSelectedModel": MODEL_MAPPING.get(request.model, request.model),
395
- }
396
-
397
- async with httpx.AsyncClient() as client:
398
- try:
399
- async with client.stream(
400
- "POST",
401
- f"{BASE_URL}/api/chat",
402
- headers=headers,
403
- json=json_data,
404
- timeout=100,
405
- ) as response:
406
- response.raise_for_status()
407
- async for line in response.aiter_lines():
408
- timestamp = int(datetime.now().timestamp())
409
- if line:
410
- content = line
411
- if content.startswith("$@$v=undefined-rv1$@$"):
412
- content = content[21:]
413
- yield f"data: {json.dumps(create_chat_completion_data(content, request.model, timestamp))}\n\n"
414
-
415
- yield f"data: {json.dumps(create_chat_completion_data('', request.model, timestamp, 'stop'))}\n\n"
416
- yield "data: [DONE]\n\n"
417
- except httpx.HTTPStatusError as e:
418
- logger.error(f"HTTP error occurred: {e}")
419
- raise HTTPException(status_code=e.response.status_code, detail=str(e))
420
- except httpx.RequestError as e:
421
- logger.error(f"Error occurred during request: {e}")
422
- raise HTTPException(status_code=500, detail=str(e))
423
-
424
-
425
- async def process_non_streaming_response(request: ChatRequest):
426
- agent_mode = AGENT_MODE.get(request.model, {})
427
- trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
428
- json_data = {
429
- "messages": [message_to_dict(msg) for msg in request.messages],
430
- "previewToken": None,
431
- "userId": None,
432
- "codeModelMode": True,
433
- "agentMode": agent_mode,
434
- "trendingAgentMode": trending_agent_mode,
435
- "isMicMode": False,
436
- "userSystemPrompt": None,
437
- "maxTokens": request.max_tokens,
438
- "playgroundTopP": request.top_p,
439
- "playgroundTemperature": request.temperature,
440
- "isChromeExt": False,
441
- "githubToken": None,
442
- "clickedAnswer2": False,
443
- "clickedAnswer3": False,
444
- "clickedForceWebSearch": False,
445
- "visitFromDelta": False,
446
- "mobileClient": False,
447
- "userSelectedModel": MODEL_MAPPING.get(request.model, request.model),
448
- }
449
- full_response = ""
450
- async with httpx.AsyncClient() as client:
451
- try:
452
- async with client.stream(
453
- method="POST", url=f"{BASE_URL}/api/chat", headers=headers, json=json_data
454
- ) as response:
455
- response.raise_for_status()
456
- async for chunk in response.aiter_text():
457
- full_response += chunk
458
- except httpx.HTTPStatusError as e:
459
- logger.error(f"HTTP error occurred: {e}")
460
- raise HTTPException(status_code=e.response.status_code, detail=str(e))
461
- except httpx.RequestError as e:
462
- logger.error(f"Error occurred during request: {e}")
463
- raise HTTPException(status_code=500, detail=str(e))
464
- if full_response.startswith("$@$v=undefined-rv1$@$"):
465
- full_response = full_response[21:]
466
-
467
- return {
468
- "id": f"chatcmpl-{uuid.uuid4()}",
469
- "object": "chat.completion",
470
- "created": int(datetime.now().timestamp()),
471
- "model": request.model,
472
- "choices": [
473
- {
474
- "index": 0,
475
- "message": {"role": "assistant", "content": full_response},
476
- "finish_reason": "stop",
477
- }
478
- ],
479
- "usage": None,
480
- }
 
1
+ from datetime import datetime
2
+ import json
3
+ from typing import Any, Dict, Optional
4
+ import uuid
5
+ import re
6
+
7
+ import httpx
8
+ from api.config import (
9
+ MODEL_MAPPING,
10
+ USER_SELECTED_MODEL,
11
+ MODEL_PREFIXES,
12
+ MODEL_REFERERS,
13
+ MODEL_ALIASES,
14
+ headers,
15
+ AGENT_MODE,
16
+ TRENDING_AGENT_MODE,
17
+ BASE_URL,
18
+ )
19
+ from fastapi import HTTPException
20
+ from api.models import ChatRequest
21
+ from api.logger import setup_logger
22
+
23
+ logger = setup_logger(__name__)
24
+
25
+
26
+ def create_chat_completion_data(
27
+ content: str, model: str, timestamp: int, finish_reason: Optional[str] = None
28
+ ) -> Dict[str, Any]:
29
+ return {
30
+ "id": f"chatcmpl-{uuid.uuid4()}",
31
+ "object": "chat.completion.chunk",
32
+ "created": timestamp,
33
+ "model": model,
34
+ "choices": [
35
+ {
36
+ "index": 0,
37
+ "delta": {"content": content, "role": "assistant"},
38
+ "finish_reason": finish_reason,
39
+ }
40
+ ],
41
+ "usage": None,
42
+ }
43
+
44
+
45
+ def message_to_dict(message):
46
+ if isinstance(message.content, str):
47
+ return {"role": message.role, "content": message.content}
48
+ elif isinstance(message.content, list) and len(message.content) == 2:
49
+ return {
50
+ "role": message.role,
51
+ "content": message.content[0]["text"],
52
+ "data": {
53
+ "imageBase64": message.content[1]["image_url"]["url"],
54
+ "fileText": "",
55
+ "title": "snapshot",
56
+ },
57
+ }
58
+ else:
59
+ return {"role": message.role, "content": message.content}
60
+
61
+
62
+ def get_full_model_name(model: str) -> str:
63
+ # Handle aliases
64
+ return MODEL_ALIASES.get(model, model)
65
+
66
+
67
+ def get_model_prefix(model: str) -> str:
68
+ return MODEL_PREFIXES.get(model, "")
69
+
70
+
71
+ def get_referer_url(model: str) -> str:
72
+ referer_path = MODEL_REFERERS.get(model, f"/?model={model}")
73
+ return f"{BASE_URL}{referer_path}"
74
+
75
+
76
+ async def process_streaming_response(request: ChatRequest):
77
+ model = get_full_model_name(request.model)
78
+ agent_mode = AGENT_MODE.get(model, {})
79
+ trending_agent_mode = TRENDING_AGENT_MODE.get(model, {})
80
+
81
+ prefix = get_model_prefix(model)
82
+
83
+ # Construct formatted prompt
84
+ formatted_prompt = ""
85
+ for msg in request.messages:
86
+ role = msg.role.capitalize()
87
+ content = msg.content
88
+ if isinstance(content, list) and len(content) == 2:
89
+ # Handle image content
90
+ content = f"FILE:BB\n$#$\n\n$#$\n{content[0]['text']}"
91
+ if role and content:
92
+ formatted_prompt += f"{role}: {content}\n"
93
+
94
+ if prefix:
95
+ formatted_prompt = f"{prefix} {formatted_prompt}".strip()
96
+
97
+ json_data = {
98
+ "messages": [
99
+ {
100
+ "role": msg.role,
101
+ "content": msg.content[0]["text"] if isinstance(msg.content, list) else msg.content,
102
+ "data": msg.content[1]["image_url"]["url"] if isinstance(msg.content, list) and len(msg.content) == 2 else None,
103
+ }
104
+ for msg in request.messages
105
+ ],
106
+ "previewToken": None,
107
+ "userId": None,
108
+ "codeModelMode": True,
109
+ "agentMode": agent_mode,
110
+ "trendingAgentMode": trending_agent_mode,
111
+ "isMicMode": False,
112
+ "userSystemPrompt": None,
113
+ "maxTokens": request.max_tokens,
114
+ "playgroundTopP": request.top_p,
115
+ "playgroundTemperature": request.temperature,
116
+ "isChromeExt": False,
117
+ "githubToken": None,
118
+ "clickedAnswer2": False,
119
+ "clickedAnswer3": False,
120
+ "clickedForceWebSearch": False,
121
+ "visitFromDelta": False,
122
+ "mobileClient": False,
123
+ "userSelectedModel": USER_SELECTED_MODEL.get(model, model),
124
+ }
125
+
126
+ async with httpx.AsyncClient() as client:
127
+ try:
128
+ async with client.stream(
129
+ "POST",
130
+ f"{BASE_URL}/api/chat",
131
+ headers=headers,
132
+ json=json_data,
133
+ timeout=100,
134
+ ) as response:
135
+ response.raise_for_status()
136
+ async for line in response.aiter_lines():
137
+ timestamp = int(datetime.now().timestamp())
138
+ if line:
139
+ content = line
140
+ # Clean the response if necessary
141
+ if content.startswith("$@$v=undefined-rv1$@$"):
142
+ content = content[21:]
143
+ yield f"data: {json.dumps(create_chat_completion_data(content, model, timestamp))}\n\n"
144
+
145
+ # Indicate the end of the stream
146
+ timestamp = int(datetime.now().timestamp())
147
+ yield f"data: {json.dumps(create_chat_completion_data('', model, timestamp, 'stop'))}\n\n"
148
+ yield "data: [DONE]\n\n"
149
+ except httpx.HTTPStatusError as e:
150
+ logger.error(f"HTTP error occurred: {e}")
151
+ raise HTTPException(status_code=e.response.status_code, detail=str(e))
152
+ except httpx.RequestError as e:
153
+ logger.error(f"Error occurred during request: {e}")
154
+ raise HTTPException(status_code=500, detail=str(e))
155
+
156
+
157
+ async def process_non_streaming_response(request: ChatRequest):
158
+ model = get_full_model_name(request.model)
159
+ agent_mode = AGENT_MODE.get(model, {})
160
+ trending_agent_mode = TRENDING_AGENT_MODE.get(model, {})
161
+
162
+ prefix = get_model_prefix(model)
163
+
164
+ # Construct formatted prompt
165
+ formatted_prompt = ""
166
+ for msg in request.messages:
167
+ role = msg.role.capitalize()
168
+ content = msg.content
169
+ if isinstance(content, list) and len(content) == 2:
170
+ # Handle image content
171
+ content = f"FILE:BB\n$#$\n\n$#$\n{msg.content[0]['text']}"
172
+ if role and content:
173
+ formatted_prompt += f"{role}: {content}\n"
174
+
175
+ if prefix:
176
+ formatted_prompt = f"{prefix} {formatted_prompt}".strip()
177
+
178
+ json_data = {
179
+ "messages": [
180
+ {
181
+ "role": msg.role,
182
+ "content": msg.content[0]["text"] if isinstance(msg.content, list) else msg.content,
183
+ "data": msg.content[1]["image_url"]["url"] if isinstance(msg.content, list) and len(msg.content) == 2 else None,
184
+ }
185
+ for msg in request.messages
186
+ ],
187
+ "previewToken": None,
188
+ "userId": None,
189
+ "codeModelMode": True,
190
+ "agentMode": agent_mode,
191
+ "trendingAgentMode": trending_agent_mode,
192
+ "isMicMode": False,
193
+ "userSystemPrompt": None,
194
+ "maxTokens": request.max_tokens,
195
+ "playgroundTopP": request.top_p,
196
+ "playgroundTemperature": request.temperature,
197
+ "isChromeExt": False,
198
+ "githubToken": None,
199
+ "clickedAnswer2": False,
200
+ "clickedAnswer3": False,
201
+ "clickedForceWebSearch": False,
202
+ "visitFromDelta": False,
203
+ "mobileClient": False,
204
+ "userSelectedModel": USER_SELECTED_MODEL.get(model, model),
205
+ }
206
+
207
+ async with httpx.AsyncClient() as client:
208
+ try:
209
+ response = await client.post(
210
+ f"{BASE_URL}/api/chat",
211
+ headers=headers,
212
+ json=json_data,
213
+ timeout=100,
214
+ )
215
+ response.raise_for_status()
216
+ full_response = response.text
217
+
218
+ # Clean the response if necessary
219
+ if full_response.startswith("$@$v=undefined-rv1$@$"):
220
+ full_response = full_response[21:]
221
+
222
+ return {
223
+ "id": f"chatcmpl-{uuid.uuid4()}",
224
+ "object": "chat.completion",
225
+ "created": int(datetime.now().timestamp()),
226
+ "model": model,
227
+ "choices": [
228
+ {
229
+ "index": 0,
230
+ "message": {"role": "assistant", "content": full_response},
231
+ "finish_reason": "stop",
232
+ }
233
+ ],
234
+ "usage": None,
235
+ }
236
+ except httpx.HTTPStatusError as e:
237
+ logger.error(f"HTTP error occurred: {e}")
238
+ raise HTTPException(status_code=e.response.status_code, detail=str(e))
239
+ except httpx.RequestError as e:
240
+ logger.error(f"Error occurred during request: {e}")
241
+ raise HTTPException(status_code=500, detail=str(e))