Niansuh commited on
Commit
dd52fd4
·
verified ·
1 Parent(s): 0c6aac2

Update api/utils.py

Browse files
Files changed (1) hide show
  1. api/utils.py +209 -198
api/utils.py CHANGED
@@ -1,198 +1,209 @@
1
- from datetime import datetime
2
- import json
3
- from typing import Any, Dict, Optional
4
-
5
- import httpx
6
- from api.config import (
7
- MODEL_MAPPING,
8
- headers,
9
- AGENT_MODE,
10
- TRENDING_AGENT_MODE,
11
- BASE_URL,
12
- MODEL_PREFIXES,
13
- MODEL_REFERERS
14
- )
15
- from fastapi import HTTPException
16
- from api.models import ChatRequest
17
-
18
- from api.logger import setup_logger
19
-
20
- import uuid # Added import for uuid
21
-
22
- logger = setup_logger(__name__)
23
-
24
- def create_chat_completion_data(
25
- content: str, model: str, timestamp: int, finish_reason: Optional[str] = None
26
- ) -> Dict[str, Any]:
27
- return {
28
- "id": f"chatcmpl-{uuid.uuid4()}",
29
- "object": "chat.completion.chunk",
30
- "created": timestamp,
31
- "model": model,
32
- "choices": [
33
- {
34
- "index": 0,
35
- "delta": {"content": content, "role": "assistant"},
36
- "finish_reason": finish_reason,
37
- }
38
- ],
39
- "usage": None,
40
- }
41
-
42
- def message_to_dict(message, model_prefix: Optional[str] = None):
43
- if isinstance(message.content, str):
44
- content = message.content
45
- if model_prefix:
46
- content = f"{model_prefix} {content}"
47
- return {"role": message.role, "content": content}
48
- elif isinstance(message.content, list) and len(message.content) == 2:
49
- content = message.content[0]["text"]
50
- if model_prefix:
51
- content = f"{model_prefix} {content}"
52
- return {
53
- "role": message.role,
54
- "content": content,
55
- "data": {
56
- "imageBase64": message.content[1]["image_url"]["url"],
57
- "fileText": "",
58
- "title": "snapshot",
59
- },
60
- }
61
- else:
62
- return {"role": message.role, "content": message.content}
63
-
64
- def strip_model_prefix(content: str, model_prefix: str) -> str:
65
- """Remove the model prefix from the response content if present."""
66
- if content.startswith(model_prefix):
67
- return content[len(model_prefix):].strip()
68
- return content
69
-
70
- async def process_streaming_response(request: ChatRequest):
71
- agent_mode = AGENT_MODE.get(request.model, {})
72
- trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
73
- model_prefix = MODEL_PREFIXES.get(request.model, "")
74
- referer_path = MODEL_REFERERS.get(request.model, f"/?model={request.model}")
75
- referer_url = f"{BASE_URL}{referer_path}"
76
-
77
- # Update headers with dynamic Referer
78
- dynamic_headers = headers.copy()
79
- dynamic_headers['Referer'] = referer_url
80
-
81
- json_data = {
82
- "messages": [message_to_dict(msg, model_prefix=model_prefix) for msg in request.messages],
83
- "previewToken": None,
84
- "userId": None,
85
- "codeModelMode": True,
86
- "agentMode": agent_mode,
87
- "trendingAgentMode": trending_agent_mode,
88
- "isMicMode": False,
89
- "userSystemPrompt": None,
90
- "maxTokens": request.max_tokens,
91
- "playgroundTopP": request.top_p,
92
- "playgroundTemperature": request.temperature,
93
- "isChromeExt": False,
94
- "githubToken": None,
95
- "clickedAnswer2": False,
96
- "clickedAnswer3": False,
97
- "clickedForceWebSearch": False,
98
- "visitFromDelta": False,
99
- "mobileClient": False,
100
- "userSelectedModel": MODEL_MAPPING.get(request.model, request.model),
101
- }
102
-
103
- async with httpx.AsyncClient() as client:
104
- try:
105
- async with client.stream(
106
- "POST",
107
- f"{BASE_URL}/api/chat",
108
- headers=dynamic_headers,
109
- json=json_data,
110
- timeout=100,
111
- ) as response:
112
- response.raise_for_status()
113
- async for line in response.aiter_lines():
114
- timestamp = int(datetime.now().timestamp())
115
- if line:
116
- content = line
117
- if content.startswith("$@$v=undefined-rv1$@$"):
118
- content = content[21:]
119
- # Strip the model prefix from the response content
120
- cleaned_content = strip_model_prefix(content, model_prefix)
121
- yield f"data: {json.dumps(create_chat_completion_data(cleaned_content, request.model, timestamp))}\n\n"
122
-
123
- yield f"data: {json.dumps(create_chat_completion_data('', request.model, timestamp, 'stop'))}\n\n"
124
- yield "data: [DONE]\n\n"
125
- except httpx.HTTPStatusError as e:
126
- logger.error(f"HTTP error occurred: {e}")
127
- raise HTTPException(status_code=e.response.status_code, detail=str(e))
128
- except httpx.RequestError as e:
129
- logger.error(f"Error occurred during request: {e}")
130
- raise HTTPException(status_code=500, detail=str(e))
131
-
132
- async def process_non_streaming_response(request: ChatRequest):
133
- agent_mode = AGENT_MODE.get(request.model, {})
134
- trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
135
- model_prefix = MODEL_PREFIXES.get(request.model, "")
136
- referer_path = MODEL_REFERERS.get(request.model, f"/?model={request.model}")
137
- referer_url = f"{BASE_URL}{referer_path}"
138
-
139
- # Update headers with dynamic Referer
140
- dynamic_headers = headers.copy()
141
- dynamic_headers['Referer'] = referer_url
142
-
143
- json_data = {
144
- "messages": [message_to_dict(msg, model_prefix=model_prefix) for msg in request.messages],
145
- "previewToken": None,
146
- "userId": None,
147
- "codeModelMode": True,
148
- "agentMode": agent_mode,
149
- "trendingAgentMode": trending_agent_mode,
150
- "isMicMode": False,
151
- "userSystemPrompt": None,
152
- "maxTokens": request.max_tokens,
153
- "playgroundTopP": request.top_p,
154
- "playgroundTemperature": request.temperature,
155
- "isChromeExt": False,
156
- "githubToken": None,
157
- "clickedAnswer2": False,
158
- "clickedAnswer3": False,
159
- "clickedForceWebSearch": False,
160
- "visitFromDelta": False,
161
- "mobileClient": False,
162
- "userSelectedModel": MODEL_MAPPING.get(request.model, request.model),
163
- }
164
- full_response = ""
165
- async with httpx.AsyncClient() as client:
166
- try:
167
- async with client.stream(
168
- method="POST", url=f"{BASE_URL}/api/chat", headers=dynamic_headers, json=json_data
169
- ) as response:
170
- response.raise_for_status()
171
- async for chunk in response.aiter_text():
172
- full_response += chunk
173
- except httpx.HTTPStatusError as e:
174
- logger.error(f"HTTP error occurred: {e}")
175
- raise HTTPException(status_code=e.response.status_code, detail=str(e))
176
- except httpx.RequestError as e:
177
- logger.error(f"Error occurred during request: {e}")
178
- raise HTTPException(status_code=500, detail=str(e))
179
- if full_response.startswith("$@$v=undefined-rv1$@$"):
180
- full_response = full_response[21:]
181
-
182
- # Strip the model prefix from the full response
183
- cleaned_full_response = strip_model_prefix(full_response, model_prefix)
184
-
185
- return {
186
- "id": f"chatcmpl-{uuid.uuid4()}",
187
- "object": "chat.completion",
188
- "created": int(datetime.now().timestamp()),
189
- "model": request.model,
190
- "choices": [
191
- {
192
- "index": 0,
193
- "message": {"role": "assistant", "content": cleaned_full_response},
194
- "finish_reason": "stop",
195
- }
196
- ],
197
- "usage": None,
198
- }
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # api/utils.py
2
+
3
+ from datetime import datetime
4
+ import json
5
+ from typing import Any, Dict, Optional
6
+
7
+ import httpx
8
+ from api.config import (
9
+ MODEL_MAPPING,
10
+ headers,
11
+ AGENT_MODE,
12
+ TRENDING_AGENT_MODE,
13
+ BASE_URL,
14
+ MODEL_PREFIXES,
15
+ MODEL_REFERERS
16
+ )
17
+ from fastapi import HTTPException
18
+ from api.models import ChatRequest
19
+
20
+ from api.logger import setup_logger
21
+
22
+ import uuid # Added import for uuid
23
+
24
+ logger = setup_logger(__name__)
25
+
26
+ def create_chat_completion_data(
27
+ content: str, model: str, timestamp: int, finish_reason: Optional[str] = None
28
+ ) -> Dict[str, Any]:
29
+ return {
30
+ "id": f"chatcmpl-{uuid.uuid4()}",
31
+ "object": "chat.completion.chunk",
32
+ "created": timestamp,
33
+ "model": model,
34
+ "choices": [
35
+ {
36
+ "index": 0,
37
+ "delta": {"content": content, "role": "assistant"},
38
+ "finish_reason": finish_reason,
39
+ }
40
+ ],
41
+ "usage": None,
42
+ }
43
+
44
+ def message_to_dict(message, model_prefix: Optional[str] = None):
45
+ if isinstance(message.content, str):
46
+ content = message.content
47
+ if model_prefix:
48
+ content = f"{model_prefix} {content}"
49
+ return {"role": message.role, "content": content}
50
+ elif isinstance(message.content, list) and len(message.content) == 2:
51
+ content = message.content[0]["text"]
52
+ if model_prefix:
53
+ content = f"{model_prefix} {content}"
54
+ return {
55
+ "role": message.role,
56
+ "content": content,
57
+ "data": {
58
+ "imageBase64": message.content[1]["image_url"]["url"],
59
+ "fileText": "",
60
+ "title": "snapshot",
61
+ },
62
+ }
63
+ else:
64
+ return {"role": message.role, "content": message.content}
65
+
66
+ def strip_model_prefix(content: str, model_prefix: str) -> str:
67
+ """Remove the model prefix and language specifier from the response content if present."""
68
+ # Remove the model prefix
69
+ if content.startswith(model_prefix):
70
+ content = content[len(model_prefix):].strip()
71
+
72
+ # Remove language specifier from code blocks (e.g., ```html -> ```)
73
+ if content.startswith("```"):
74
+ first_newline = content.find("\n")
75
+ if first_newline != -1:
76
+ # Retain the triple backticks and the content after the language specifier
77
+ content = "```\n" + content[first_newline + 1:]
78
+
79
+ return content
80
+
81
+ async def process_streaming_response(request: ChatRequest):
82
+ agent_mode = AGENT_MODE.get(request.model, {})
83
+ trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
84
+ model_prefix = MODEL_PREFIXES.get(request.model, "")
85
+ referer_path = MODEL_REFERERS.get(request.model, f"/?model={request.model}")
86
+ referer_url = f"{BASE_URL}{referer_path}"
87
+
88
+ # Update headers with dynamic Referer
89
+ dynamic_headers = headers.copy()
90
+ dynamic_headers['Referer'] = referer_url
91
+
92
+ json_data = {
93
+ "messages": [message_to_dict(msg, model_prefix=model_prefix) for msg in request.messages],
94
+ "previewToken": None,
95
+ "userId": None,
96
+ "codeModelMode": True,
97
+ "agentMode": agent_mode,
98
+ "trendingAgentMode": trending_agent_mode,
99
+ "isMicMode": False,
100
+ "userSystemPrompt": None,
101
+ "maxTokens": request.max_tokens,
102
+ "playgroundTopP": request.top_p,
103
+ "playgroundTemperature": request.temperature,
104
+ "isChromeExt": False,
105
+ "githubToken": None,
106
+ "clickedAnswer2": False,
107
+ "clickedAnswer3": False,
108
+ "clickedForceWebSearch": False,
109
+ "visitFromDelta": False,
110
+ "mobileClient": False,
111
+ "userSelectedModel": MODEL_MAPPING.get(request.model, request.model),
112
+ }
113
+
114
+ async with httpx.AsyncClient() as client:
115
+ try:
116
+ async with client.stream(
117
+ "POST",
118
+ f"{BASE_URL}/api/chat",
119
+ headers=dynamic_headers,
120
+ json=json_data,
121
+ timeout=100,
122
+ ) as response:
123
+ response.raise_for_status()
124
+ async for line in response.aiter_lines():
125
+ timestamp = int(datetime.now().timestamp())
126
+ if line:
127
+ content = line
128
+ if content.startswith("$@$v=undefined-rv1$@$"):
129
+ content = content[21:]
130
+ # Strip the model prefix and handle code blocks
131
+ cleaned_content = strip_model_prefix(content, model_prefix)
132
+ yield f"data: {json.dumps(create_chat_completion_data(cleaned_content, request.model, timestamp))}\n\n"
133
+
134
+ yield f"data: {json.dumps(create_chat_completion_data('', request.model, timestamp, 'stop'))}\n\n"
135
+ yield "data: [DONE]\n\n"
136
+ except httpx.HTTPStatusError as e:
137
+ logger.error(f"HTTP error occurred: {e}")
138
+ raise HTTPException(status_code=e.response.status_code, detail=str(e))
139
+ except httpx.RequestError as e:
140
+ logger.error(f"Error occurred during request: {e}")
141
+ raise HTTPException(status_code=500, detail=str(e))
142
+
143
+ async def process_non_streaming_response(request: ChatRequest):
144
+ agent_mode = AGENT_MODE.get(request.model, {})
145
+ trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
146
+ model_prefix = MODEL_PREFIXES.get(request.model, "")
147
+ referer_path = MODEL_REFERERS.get(request.model, f"/?model={request.model}")
148
+ referer_url = f"{BASE_URL}{referer_path}"
149
+
150
+ # Update headers with dynamic Referer
151
+ dynamic_headers = headers.copy()
152
+ dynamic_headers['Referer'] = referer_url
153
+
154
+ json_data = {
155
+ "messages": [message_to_dict(msg, model_prefix=model_prefix) for msg in request.messages],
156
+ "previewToken": None,
157
+ "userId": None,
158
+ "codeModelMode": True,
159
+ "agentMode": agent_mode,
160
+ "trendingAgentMode": trending_agent_mode,
161
+ "isMicMode": False,
162
+ "userSystemPrompt": None,
163
+ "maxTokens": request.max_tokens,
164
+ "playgroundTopP": request.top_p,
165
+ "playgroundTemperature": request.temperature,
166
+ "isChromeExt": False,
167
+ "githubToken": None,
168
+ "clickedAnswer2": False,
169
+ "clickedAnswer3": False,
170
+ "clickedForceWebSearch": False,
171
+ "visitFromDelta": False,
172
+ "mobileClient": False,
173
+ "userSelectedModel": MODEL_MAPPING.get(request.model, request.model),
174
+ }
175
+ full_response = ""
176
+ async with httpx.AsyncClient() as client:
177
+ try:
178
+ async with client.stream(
179
+ method="POST", url=f"{BASE_URL}/api/chat", headers=dynamic_headers, json=json_data
180
+ ) as response:
181
+ response.raise_for_status()
182
+ async for chunk in response.aiter_text():
183
+ full_response += chunk
184
+ except httpx.HTTPStatusError as e:
185
+ logger.error(f"HTTP error occurred: {e}")
186
+ raise HTTPException(status_code=e.response.status_code, detail=str(e))
187
+ except httpx.RequestError as e:
188
+ logger.error(f"Error occurred during request: {e}")
189
+ raise HTTPException(status_code=500, detail=str(e))
190
+ if full_response.startswith("$@$v=undefined-rv1$@$"):
191
+ full_response = full_response[21:]
192
+
193
+ # Strip the model prefix and handle code blocks
194
+ cleaned_full_response = strip_model_prefix(full_response, model_prefix)
195
+
196
+ return {
197
+ "id": f"chatcmpl-{uuid.uuid4()}",
198
+ "object": "chat.completion",
199
+ "created": int(datetime.now().timestamp()),
200
+ "model": request.model,
201
+ "choices": [
202
+ {
203
+ "index": 0,
204
+ "message": {"role": "assistant", "content": cleaned_full_response},
205
+ "finish_reason": "stop",
206
+ }
207
+ ],
208
+ "usage": None,
209
+ }