Niansuh commited on
Commit
c3909c5
·
verified ·
1 Parent(s): 3c53238

Update api/utils.py

Browse files
Files changed (1) hide show
  1. api/utils.py +189 -480
api/utils.py CHANGED
@@ -1,480 +1,189 @@
1
- from datetime import datetime
2
- import json
3
- from typing import Any, Dict, Optional
4
- import uuid
5
-
6
- import httpx
7
- from api.config import MODEL_MAPPING, headers, AGENT_MODE, TRENDING_AGENT_MODE, BASE_URL
8
- from fastapi import HTTPException
9
- from api.models import ChatRequest
10
-
11
- from api.logger import setup_logger
12
-
13
- logger = setup_logger(__name__)
14
-
15
-
16
- def create_chat_completion_data(
17
- content: str, model: str, timestamp: int, finish_reason: Optional[str] = None
18
- ) -> Dict[str, Any]:
19
- return {
20
- "id": f"chatcmpl-{uuid.uuid4()}",
21
- "object": "chat.completion.chunk",
22
- "created": timestamp,
23
- "model": model,
24
- "choices": [
25
- {
26
- "index": 0,
27
- "delta": {"content": content, "role": "assistant"},
28
- "finish_reason": finish_reason,
29
- }
30
- ],
31
- "usage": None,
32
- }
33
-
34
-
35
- def message_to_dict(message):
36
- if isinstance(message.content, str):
37
- return {"role": message.role, "content": message.content}
38
- elif isinstance(message.content, list) and len(message.content) == 2:
39
- return {
40
- "role": message.role,
41
- "content": message.content[0]["text"],
42
- "data": {
43
- "imageBase64": message.content[1]["image_url"]["url"],
44
- "fileText": "",
45
- "title": "snapshot",
46
- },
47
- }
48
- else:
49
- return {"role": message.role, "content": message.content}
50
-
51
-
52
- async def process_streaming_response(request: ChatRequest):
53
- agent_mode = AGENT_MODE.get(request.model, {})
54
- trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
55
- json_data = {
56
- "messages": [message_to_dict(msg) for msg in request.messages],
57
- "previewToken": None,
58
- "userId": None,
59
- "codeModelMode": True,
60
- "agentMode": agent_mode,
61
- "trendingAgentMode": trending_agent_mode,
62
- "isMicMode": False,
63
- "userSystemPrompt": None,
64
- "maxTokens": request.max_tokens,
65
- "playgroundTopP": request.top_p,
66
- "playgroundTemperature": request.temperature,
67
- "isChromeExt": False,
68
- "githubToken": None,
69
- "clickedAnswer2": False,
70
- "clickedAnswer3": False,
71
- "clickedForceWebSearch": False,
72
- "visitFromDelta": False,
73
- "mobileClient": False,
74
- "userSelectedModel": MODEL_MAPPING.get(request.model, request.model),
75
- }
76
-
77
- async with httpx.AsyncClient() as client:
78
- try:
79
- async with client.stream(
80
- "POST",
81
- f"{BASE_URL}/api/chat",
82
- headers=headers,
83
- json=json_data,
84
- timeout=100,
85
- ) as response:
86
- response.raise_for_status()
87
- async for line in response.aiter_lines():
88
- timestamp = int(datetime.now().timestamp())
89
- if line:
90
- content = line
91
- if content.startswith("$@$v=undefined-rv1$@$"):
92
- content = content[21:]
93
- yield f"data: {json.dumps(create_chat_completion_data(content, request.model, timestamp))}\n\n"
94
-
95
- yield f"data: {json.dumps(create_chat_completion_data('', request.model, timestamp, 'stop'))}\n\n"
96
- yield "data: [DONE]\n\n"
97
- except httpx.HTTPStatusError as e:
98
- logger.error(f"HTTP error occurred: {e}")
99
- raise HTTPException(status_code=e.response.status_code, detail=str(e))
100
- except httpx.RequestError as e:
101
- logger.error(f"Error occurred during request: {e}")
102
- raise HTTPException(status_code=500, detail=str(e))
103
-
104
-
105
- async def process_non_streaming_response(request: ChatRequest):
106
- agent_mode = AGENT_MODE.get(request.model, {})
107
- trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
108
- json_data = {
109
- "messages": [message_to_dict(msg) for msg in request.messages],
110
- "previewToken": None,
111
- "userId": None,
112
- "codeModelMode": True,
113
- "agentMode": agent_mode,
114
- "trendingAgentMode": trending_agent_mode,
115
- "isMicMode": False,
116
- "userSystemPrompt": None,
117
- "maxTokens": request.max_tokens,
118
- "playgroundTopP": request.top_p,
119
- "playgroundTemperature": request.temperature,
120
- "isChromeExt": False,
121
- "githubToken": None,
122
- "clickedAnswer2": False,
123
- "clickedAnswer3": False,
124
- "clickedForceWebSearch": False,
125
- "visitFromDelta": False,
126
- "mobileClient": False,
127
- "userSelectedModel": MODEL_MAPPING.get(request.model, request.model),
128
- }
129
- full_response = ""
130
- async with httpx.AsyncClient() as client:
131
- try:
132
- async with client.stream(
133
- method="POST", url=f"{BASE_URL}/api/chat", headers=headers, json=json_data
134
- ) as response:
135
- response.raise_for_status()
136
- async for chunk in response.aiter_text():
137
- full_response += chunk
138
- except httpx.HTTPStatusError as e:
139
- logger.error(f"HTTP error occurred: {e}")
140
- raise HTTPException(status_code=e.response.status_code, detail=str(e))
141
- except httpx.RequestError as e:
142
- logger.error(f"Error occurred during request: {e}")
143
- raise HTTPException(status_code=500, detail=str(e))
144
- if full_response.startswith("$@$v=undefined-rv1$@$"):
145
- full_response = full_response[21:]
146
-
147
- return {
148
- "id": f"chatcmpl-{uuid.uuid4()}",
149
- "object": "chat.completion",
150
- "created": int(datetime.now().timestamp()),
151
- "model": request.model,
152
- "choices": [
153
- {
154
- "index": 0,
155
- "message": {"role": "assistant", "content": full_response},
156
- "finish_reason": "stop",
157
- }
158
- ],
159
- "usage": None,
160
- }
161
- from datetime import datetime
162
- import json
163
- from typing import Any, Dict, Optional
164
- import uuid
165
-
166
- import httpx
167
- from api.config import MODEL_MAPPING, headers, AGENT_MODE, TRENDING_AGENT_MODE, BASE_URL
168
- from fastapi import HTTPException
169
- from api.models import ChatRequest
170
-
171
- from api.logger import setup_logger
172
-
173
- logger = setup_logger(__name__)
174
-
175
-
176
- def create_chat_completion_data(
177
- content: str, model: str, timestamp: int, finish_reason: Optional[str] = None
178
- ) -> Dict[str, Any]:
179
- return {
180
- "id": f"chatcmpl-{uuid.uuid4()}",
181
- "object": "chat.completion.chunk",
182
- "created": timestamp,
183
- "model": model,
184
- "choices": [
185
- {
186
- "index": 0,
187
- "delta": {"content": content, "role": "assistant"},
188
- "finish_reason": finish_reason,
189
- }
190
- ],
191
- "usage": None,
192
- }
193
-
194
-
195
- def message_to_dict(message):
196
- if isinstance(message.content, str):
197
- return {"role": message.role, "content": message.content}
198
- elif isinstance(message.content, list) and len(message.content) == 2:
199
- return {
200
- "role": message.role,
201
- "content": message.content[0]["text"],
202
- "data": {
203
- "imageBase64": message.content[1]["image_url"]["url"],
204
- "fileText": "",
205
- "title": "snapshot",
206
- },
207
- }
208
- else:
209
- return {"role": message.role, "content": message.content}
210
-
211
-
212
- async def process_streaming_response(request: ChatRequest):
213
- agent_mode = AGENT_MODE.get(request.model, {})
214
- trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
215
- json_data = {
216
- "messages": [message_to_dict(msg) for msg in request.messages],
217
- "previewToken": None,
218
- "userId": None,
219
- "codeModelMode": True,
220
- "agentMode": agent_mode,
221
- "trendingAgentMode": trending_agent_mode,
222
- "isMicMode": False,
223
- "userSystemPrompt": None,
224
- "maxTokens": request.max_tokens,
225
- "playgroundTopP": request.top_p,
226
- "playgroundTemperature": request.temperature,
227
- "isChromeExt": False,
228
- "githubToken": None,
229
- "clickedAnswer2": False,
230
- "clickedAnswer3": False,
231
- "clickedForceWebSearch": False,
232
- "visitFromDelta": False,
233
- "mobileClient": False,
234
- "userSelectedModel": MODEL_MAPPING.get(request.model, request.model),
235
- }
236
-
237
- async with httpx.AsyncClient() as client:
238
- try:
239
- async with client.stream(
240
- "POST",
241
- f"{BASE_URL}/api/chat",
242
- headers=headers,
243
- json=json_data,
244
- timeout=100,
245
- ) as response:
246
- response.raise_for_status()
247
- async for line in response.aiter_lines():
248
- timestamp = int(datetime.now().timestamp())
249
- if line:
250
- content = line
251
- if content.startswith("$@$v=undefined-rv1$@$"):
252
- content = content[21:]
253
- yield f"data: {json.dumps(create_chat_completion_data(content, request.model, timestamp))}\n\n"
254
-
255
- yield f"data: {json.dumps(create_chat_completion_data('', request.model, timestamp, 'stop'))}\n\n"
256
- yield "data: [DONE]\n\n"
257
- except httpx.HTTPStatusError as e:
258
- logger.error(f"HTTP error occurred: {e}")
259
- raise HTTPException(status_code=e.response.status_code, detail=str(e))
260
- except httpx.RequestError as e:
261
- logger.error(f"Error occurred during request: {e}")
262
- raise HTTPException(status_code=500, detail=str(e))
263
-
264
-
265
- async def process_non_streaming_response(request: ChatRequest):
266
- agent_mode = AGENT_MODE.get(request.model, {})
267
- trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
268
- json_data = {
269
- "messages": [message_to_dict(msg) for msg in request.messages],
270
- "previewToken": None,
271
- "userId": None,
272
- "codeModelMode": True,
273
- "agentMode": agent_mode,
274
- "trendingAgentMode": trending_agent_mode,
275
- "isMicMode": False,
276
- "userSystemPrompt": None,
277
- "maxTokens": request.max_tokens,
278
- "playgroundTopP": request.top_p,
279
- "playgroundTemperature": request.temperature,
280
- "isChromeExt": False,
281
- "githubToken": None,
282
- "clickedAnswer2": False,
283
- "clickedAnswer3": False,
284
- "clickedForceWebSearch": False,
285
- "visitFromDelta": False,
286
- "mobileClient": False,
287
- "userSelectedModel": MODEL_MAPPING.get(request.model, request.model),
288
- }
289
- full_response = ""
290
- async with httpx.AsyncClient() as client:
291
- try:
292
- async with client.stream(
293
- method="POST", url=f"{BASE_URL}/api/chat", headers=headers, json=json_data
294
- ) as response:
295
- response.raise_for_status()
296
- async for chunk in response.aiter_text():
297
- full_response += chunk
298
- except httpx.HTTPStatusError as e:
299
- logger.error(f"HTTP error occurred: {e}")
300
- raise HTTPException(status_code=e.response.status_code, detail=str(e))
301
- except httpx.RequestError as e:
302
- logger.error(f"Error occurred during request: {e}")
303
- raise HTTPException(status_code=500, detail=str(e))
304
- if full_response.startswith("$@$v=undefined-rv1$@$"):
305
- full_response = full_response[21:]
306
-
307
- return {
308
- "id": f"chatcmpl-{uuid.uuid4()}",
309
- "object": "chat.completion",
310
- "created": int(datetime.now().timestamp()),
311
- "model": request.model,
312
- "choices": [
313
- {
314
- "index": 0,
315
- "message": {"role": "assistant", "content": full_response},
316
- "finish_reason": "stop",
317
- }
318
- ],
319
- "usage": None,
320
- }
321
- from datetime import datetime
322
- import json
323
- from typing import Any, Dict, Optional
324
- import uuid
325
-
326
- import httpx
327
- from api.config import MODEL_MAPPING, headers, AGENT_MODE, TRENDING_AGENT_MODE, BASE_URL
328
- from fastapi import HTTPException
329
- from api.models import ChatRequest
330
-
331
- from api.logger import setup_logger
332
-
333
- logger = setup_logger(__name__)
334
-
335
-
336
- def create_chat_completion_data(
337
- content: str, model: str, timestamp: int, finish_reason: Optional[str] = None
338
- ) -> Dict[str, Any]:
339
- return {
340
- "id": f"chatcmpl-{uuid.uuid4()}",
341
- "object": "chat.completion.chunk",
342
- "created": timestamp,
343
- "model": model,
344
- "choices": [
345
- {
346
- "index": 0,
347
- "delta": {"content": content, "role": "assistant"},
348
- "finish_reason": finish_reason,
349
- }
350
- ],
351
- "usage": None,
352
- }
353
-
354
-
355
- def message_to_dict(message):
356
- if isinstance(message.content, str):
357
- return {"role": message.role, "content": message.content}
358
- elif isinstance(message.content, list) and len(message.content) == 2:
359
- return {
360
- "role": message.role,
361
- "content": message.content[0]["text"],
362
- "data": {
363
- "imageBase64": message.content[1]["image_url"]["url"],
364
- "fileText": "",
365
- "title": "snapshot",
366
- },
367
- }
368
- else:
369
- return {"role": message.role, "content": message.content}
370
-
371
-
372
- async def process_streaming_response(request: ChatRequest):
373
- agent_mode = AGENT_MODE.get(request.model, {})
374
- trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
375
- json_data = {
376
- "messages": [message_to_dict(msg) for msg in request.messages],
377
- "previewToken": None,
378
- "userId": None,
379
- "codeModelMode": True,
380
- "agentMode": agent_mode,
381
- "trendingAgentMode": trending_agent_mode,
382
- "isMicMode": False,
383
- "userSystemPrompt": None,
384
- "maxTokens": request.max_tokens,
385
- "playgroundTopP": request.top_p,
386
- "playgroundTemperature": request.temperature,
387
- "isChromeExt": False,
388
- "githubToken": None,
389
- "clickedAnswer2": False,
390
- "clickedAnswer3": False,
391
- "clickedForceWebSearch": False,
392
- "visitFromDelta": False,
393
- "mobileClient": False,
394
- "userSelectedModel": MODEL_MAPPING.get(request.model, request.model),
395
- }
396
-
397
- async with httpx.AsyncClient() as client:
398
- try:
399
- async with client.stream(
400
- "POST",
401
- f"{BASE_URL}/api/chat",
402
- headers=headers,
403
- json=json_data,
404
- timeout=100,
405
- ) as response:
406
- response.raise_for_status()
407
- async for line in response.aiter_lines():
408
- timestamp = int(datetime.now().timestamp())
409
- if line:
410
- content = line
411
- if content.startswith("$@$v=undefined-rv1$@$"):
412
- content = content[21:]
413
- yield f"data: {json.dumps(create_chat_completion_data(content, request.model, timestamp))}\n\n"
414
-
415
- yield f"data: {json.dumps(create_chat_completion_data('', request.model, timestamp, 'stop'))}\n\n"
416
- yield "data: [DONE]\n\n"
417
- except httpx.HTTPStatusError as e:
418
- logger.error(f"HTTP error occurred: {e}")
419
- raise HTTPException(status_code=e.response.status_code, detail=str(e))
420
- except httpx.RequestError as e:
421
- logger.error(f"Error occurred during request: {e}")
422
- raise HTTPException(status_code=500, detail=str(e))
423
-
424
-
425
- async def process_non_streaming_response(request: ChatRequest):
426
- agent_mode = AGENT_MODE.get(request.model, {})
427
- trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
428
- json_data = {
429
- "messages": [message_to_dict(msg) for msg in request.messages],
430
- "previewToken": None,
431
- "userId": None,
432
- "codeModelMode": True,
433
- "agentMode": agent_mode,
434
- "trendingAgentMode": trending_agent_mode,
435
- "isMicMode": False,
436
- "userSystemPrompt": None,
437
- "maxTokens": request.max_tokens,
438
- "playgroundTopP": request.top_p,
439
- "playgroundTemperature": request.temperature,
440
- "isChromeExt": False,
441
- "githubToken": None,
442
- "clickedAnswer2": False,
443
- "clickedAnswer3": False,
444
- "clickedForceWebSearch": False,
445
- "visitFromDelta": False,
446
- "mobileClient": False,
447
- "userSelectedModel": MODEL_MAPPING.get(request.model, request.model),
448
- }
449
- full_response = ""
450
- async with httpx.AsyncClient() as client:
451
- try:
452
- async with client.stream(
453
- method="POST", url=f"{BASE_URL}/api/chat", headers=headers, json=json_data
454
- ) as response:
455
- response.raise_for_status()
456
- async for chunk in response.aiter_text():
457
- full_response += chunk
458
- except httpx.HTTPStatusError as e:
459
- logger.error(f"HTTP error occurred: {e}")
460
- raise HTTPException(status_code=e.response.status_code, detail=str(e))
461
- except httpx.RequestError as e:
462
- logger.error(f"Error occurred during request: {e}")
463
- raise HTTPException(status_code=500, detail=str(e))
464
- if full_response.startswith("$@$v=undefined-rv1$@$"):
465
- full_response = full_response[21:]
466
-
467
- return {
468
- "id": f"chatcmpl-{uuid.uuid4()}",
469
- "object": "chat.completion",
470
- "created": int(datetime.now().timestamp()),
471
- "model": request.model,
472
- "choices": [
473
- {
474
- "index": 0,
475
- "message": {"role": "assistant", "content": full_response},
476
- "finish_reason": "stop",
477
- }
478
- ],
479
- "usage": None,
480
- }
 
1
+ # api/utils.py
2
+
3
+ from datetime import datetime
4
+ import json
5
+ from typing import Any, Dict, Optional
6
+
7
+ import httpx
8
+ from api.config import (
9
+ MODEL_MAPPING,
10
+ headers,
11
+ AGENT_MODE,
12
+ TRENDING_AGENT_MODE,
13
+ BASE_URL,
14
+ MODEL_PREFIXES,
15
+ MODEL_REFERERS
16
+ )
17
+ from fastapi import HTTPException
18
+ from api.models import ChatRequest
19
+
20
+ from api.logger import setup_logger
21
+
22
+ import uuid # Added import for uuid
23
+
24
+ logger = setup_logger(__name__)
25
+
26
+ def create_chat_completion_data(
27
+ content: str, model: str, timestamp: int, finish_reason: Optional[str] = None
28
+ ) -> Dict[str, Any]:
29
+ return {
30
+ "id": f"chatcmpl-{uuid.uuid4()}",
31
+ "object": "chat.completion.chunk",
32
+ "created": timestamp,
33
+ "model": model,
34
+ "choices": [
35
+ {
36
+ "index": 0,
37
+ "delta": {"content": content, "role": "assistant"},
38
+ "finish_reason": finish_reason,
39
+ }
40
+ ],
41
+ "usage": None,
42
+ }
43
+
44
+ def message_to_dict(message, model_prefix: Optional[str] = None):
45
+ if isinstance(message.content, str):
46
+ content = message.content
47
+ if model_prefix:
48
+ content = f"{model_prefix} {content}"
49
+ return {"role": message.role, "content": content}
50
+ elif isinstance(message.content, list) and len(message.content) == 2:
51
+ content = message.content[0]["text"]
52
+ if model_prefix:
53
+ content = f"{model_prefix} {content}"
54
+ return {
55
+ "role": message.role,
56
+ "content": content,
57
+ "data": {
58
+ "imageBase64": message.content[1]["image_url"]["url"],
59
+ "fileText": "",
60
+ "title": "snapshot",
61
+ },
62
+ }
63
+ else:
64
+ return {"role": message.role, "content": message.content}
65
+
66
+ async def process_streaming_response(request: ChatRequest):
67
+ agent_mode = AGENT_MODE.get(request.model, {})
68
+ trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
69
+ model_prefix = MODEL_PREFIXES.get(request.model, "")
70
+ referer_path = MODEL_REFERERS.get(request.model, f"/?model={request.model}")
71
+ referer_url = f"{BASE_URL}{referer_path}"
72
+
73
+ # Update headers with dynamic Referer
74
+ dynamic_headers = headers.copy()
75
+ dynamic_headers['Referer'] = referer_url
76
+
77
+ json_data = {
78
+ "messages": [message_to_dict(msg, model_prefix=model_prefix) for msg in request.messages],
79
+ "previewToken": None,
80
+ "userId": None,
81
+ "codeModelMode": True,
82
+ "agentMode": agent_mode,
83
+ "trendingAgentMode": trending_agent_mode,
84
+ "isMicMode": False,
85
+ "userSystemPrompt": None,
86
+ "maxTokens": request.max_tokens,
87
+ "playgroundTopP": request.top_p,
88
+ "playgroundTemperature": request.temperature,
89
+ "isChromeExt": False,
90
+ "githubToken": None,
91
+ "clickedAnswer2": False,
92
+ "clickedAnswer3": False,
93
+ "clickedForceWebSearch": False,
94
+ "visitFromDelta": False,
95
+ "mobileClient": False,
96
+ "userSelectedModel": MODEL_MAPPING.get(request.model, request.model),
97
+ }
98
+
99
+ async with httpx.AsyncClient() as client:
100
+ try:
101
+ async with client.stream(
102
+ "POST",
103
+ f"{BASE_URL}/api/chat",
104
+ headers=dynamic_headers,
105
+ json=json_data,
106
+ timeout=100,
107
+ ) as response:
108
+ response.raise_for_status()
109
+ async for line in response.aiter_lines():
110
+ timestamp = int(datetime.now().timestamp())
111
+ if line:
112
+ content = line
113
+ if content.startswith("$@$v=undefined-rv1$@$"):
114
+ content = content[21:]
115
+ yield f"data: {json.dumps(create_chat_completion_data(content, request.model, timestamp))}\n\n"
116
+
117
+ yield f"data: {json.dumps(create_chat_completion_data('', request.model, timestamp, 'stop'))}\n\n"
118
+ yield "data: [DONE]\n\n"
119
+ except httpx.HTTPStatusError as e:
120
+ logger.error(f"HTTP error occurred: {e}")
121
+ raise HTTPException(status_code=e.response.status_code, detail=str(e))
122
+ except httpx.RequestError as e:
123
+ logger.error(f"Error occurred during request: {e}")
124
+ raise HTTPException(status_code=500, detail=str(e))
125
+
126
+ async def process_non_streaming_response(request: ChatRequest):
127
+ agent_mode = AGENT_MODE.get(request.model, {})
128
+ trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
129
+ model_prefix = MODEL_PREFIXES.get(request.model, "")
130
+ referer_path = MODEL_REFERERS.get(request.model, f"/?model={request.model}")
131
+ referer_url = f"{BASE_URL}{referer_path}"
132
+
133
+ # Update headers with dynamic Referer
134
+ dynamic_headers = headers.copy()
135
+ dynamic_headers['Referer'] = referer_url
136
+
137
+ json_data = {
138
+ "messages": [message_to_dict(msg, model_prefix=model_prefix) for msg in request.messages],
139
+ "previewToken": None,
140
+ "userId": None,
141
+ "codeModelMode": True,
142
+ "agentMode": agent_mode,
143
+ "trendingAgentMode": trending_agent_mode,
144
+ "isMicMode": False,
145
+ "userSystemPrompt": None,
146
+ "maxTokens": request.max_tokens,
147
+ "playgroundTopP": request.top_p,
148
+ "playgroundTemperature": request.temperature,
149
+ "isChromeExt": False,
150
+ "githubToken": None,
151
+ "clickedAnswer2": False,
152
+ "clickedAnswer3": False,
153
+ "clickedForceWebSearch": False,
154
+ "visitFromDelta": False,
155
+ "mobileClient": False,
156
+ "userSelectedModel": MODEL_MAPPING.get(request.model, request.model),
157
+ }
158
+ full_response = ""
159
+ async with httpx.AsyncClient() as client:
160
+ try:
161
+ async with client.stream(
162
+ method="POST", url=f"{BASE_URL}/api/chat", headers=dynamic_headers, json=json_data
163
+ ) as response:
164
+ response.raise_for_status()
165
+ async for chunk in response.aiter_text():
166
+ full_response += chunk
167
+ except httpx.HTTPStatusError as e:
168
+ logger.error(f"HTTP error occurred: {e}")
169
+ raise HTTPException(status_code=e.response.status_code, detail=str(e))
170
+ except httpx.RequestError as e:
171
+ logger.error(f"Error occurred during request: {e}")
172
+ raise HTTPException(status_code=500, detail=str(e))
173
+ if full_response.startswith("$@$v=undefined-rv1$@$"):
174
+ full_response = full_response[21:]
175
+
176
+ return {
177
+ "id": f"chatcmpl-{uuid.uuid4()}",
178
+ "object": "chat.completion",
179
+ "created": int(datetime.now().timestamp()),
180
+ "model": request.model,
181
+ "choices": [
182
+ {
183
+ "index": 0,
184
+ "message": {"role": "assistant", "content": full_response},
185
+ "finish_reason": "stop",
186
+ }
187
+ ],
188
+ "usage": None,
189
+ }