Update api/utils.py
Browse files- api/utils.py +72 -26
api/utils.py
CHANGED
@@ -1,6 +1,8 @@
|
|
|
|
|
|
1 |
from datetime import datetime
|
2 |
import json
|
3 |
-
from typing import Any, Dict, Optional
|
4 |
|
5 |
import httpx
|
6 |
from api.config import (
|
@@ -13,17 +15,66 @@ from api.config import (
|
|
13 |
MODEL_REFERERS
|
14 |
)
|
15 |
from fastapi import HTTPException
|
16 |
-
from api.models import ChatRequest
|
17 |
|
18 |
from api.logger import setup_logger
|
19 |
|
20 |
-
import uuid
|
21 |
|
22 |
logger = setup_logger(__name__)
|
23 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
24 |
async def process_streaming_response(request: ChatRequest):
|
25 |
agent_mode = AGENT_MODE.get(request.model, {})
|
26 |
trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
|
|
|
27 |
referer_path = MODEL_REFERERS.get(request.model, f"/?model={request.model}")
|
28 |
referer_url = f"{BASE_URL}{referer_path}"
|
29 |
|
@@ -31,11 +82,8 @@ async def process_streaming_response(request: ChatRequest):
|
|
31 |
dynamic_headers = headers.copy()
|
32 |
dynamic_headers['Referer'] = referer_url
|
33 |
|
34 |
-
# Convert Message objects to dictionaries
|
35 |
-
messages = [msg.dict() for msg in request.messages]
|
36 |
-
|
37 |
json_data = {
|
38 |
-
"messages":
|
39 |
"previewToken": None,
|
40 |
"userId": None,
|
41 |
"codeModelMode": True,
|
@@ -72,10 +120,12 @@ async def process_streaming_response(request: ChatRequest):
|
|
72 |
content = line
|
73 |
if content.startswith("$@$v=undefined-rv1$@$"):
|
74 |
content = content[21:]
|
75 |
-
#
|
76 |
-
|
|
|
77 |
|
78 |
-
yield f"data:
|
|
|
79 |
except httpx.HTTPStatusError as e:
|
80 |
logger.error(f"HTTP error occurred: {e}")
|
81 |
raise HTTPException(status_code=e.response.status_code, detail=str(e))
|
@@ -83,10 +133,10 @@ async def process_streaming_response(request: ChatRequest):
|
|
83 |
logger.error(f"Error occurred during request: {e}")
|
84 |
raise HTTPException(status_code=500, detail=str(e))
|
85 |
|
86 |
-
|
87 |
async def process_non_streaming_response(request: ChatRequest):
|
88 |
agent_mode = AGENT_MODE.get(request.model, {})
|
89 |
trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
|
|
|
90 |
referer_path = MODEL_REFERERS.get(request.model, f"/?model={request.model}")
|
91 |
referer_url = f"{BASE_URL}{referer_path}"
|
92 |
|
@@ -94,11 +144,8 @@ async def process_non_streaming_response(request: ChatRequest):
|
|
94 |
dynamic_headers = headers.copy()
|
95 |
dynamic_headers['Referer'] = referer_url
|
96 |
|
97 |
-
# Convert Message objects to dictionaries
|
98 |
-
messages = [msg.dict() for msg in request.messages]
|
99 |
-
|
100 |
json_data = {
|
101 |
-
"messages":
|
102 |
"previewToken": None,
|
103 |
"userId": None,
|
104 |
"codeModelMode": True,
|
@@ -121,25 +168,24 @@ async def process_non_streaming_response(request: ChatRequest):
|
|
121 |
full_response = ""
|
122 |
async with httpx.AsyncClient() as client:
|
123 |
try:
|
124 |
-
|
125 |
-
f"{BASE_URL}/api/chat",
|
126 |
-
|
127 |
-
|
128 |
-
|
129 |
-
|
130 |
-
response.raise_for_status()
|
131 |
-
full_response = response.text # Get the response text
|
132 |
except httpx.HTTPStatusError as e:
|
133 |
logger.error(f"HTTP error occurred: {e}")
|
134 |
raise HTTPException(status_code=e.response.status_code, detail=str(e))
|
135 |
except httpx.RequestError as e:
|
136 |
logger.error(f"Error occurred during request: {e}")
|
137 |
raise HTTPException(status_code=500, detail=str(e))
|
138 |
-
|
139 |
if full_response.startswith("$@$v=undefined-rv1$@$"):
|
140 |
full_response = full_response[21:]
|
141 |
|
142 |
-
#
|
|
|
|
|
143 |
return {
|
144 |
"id": f"chatcmpl-{uuid.uuid4()}",
|
145 |
"object": "chat.completion",
|
@@ -148,7 +194,7 @@ async def process_non_streaming_response(request: ChatRequest):
|
|
148 |
"choices": [
|
149 |
{
|
150 |
"index": 0,
|
151 |
-
"message": {"role": "assistant", "content":
|
152 |
"finish_reason": "stop",
|
153 |
}
|
154 |
],
|
|
|
1 |
+
# api/utils.py
|
2 |
+
|
3 |
from datetime import datetime
|
4 |
import json
|
5 |
+
from typing import Any, Dict, Optional
|
6 |
|
7 |
import httpx
|
8 |
from api.config import (
|
|
|
15 |
MODEL_REFERERS
|
16 |
)
|
17 |
from fastapi import HTTPException
|
18 |
+
from api.models import ChatRequest
|
19 |
|
20 |
from api.logger import setup_logger
|
21 |
|
22 |
+
import uuid
|
23 |
|
24 |
logger = setup_logger(__name__)
|
25 |
|
26 |
+
def create_chat_completion_data(
|
27 |
+
content: str, model: str, timestamp: int, finish_reason: Optional[str] = None
|
28 |
+
) -> Dict[str, Any]:
|
29 |
+
return {
|
30 |
+
"id": f"chatcmpl-{uuid.uuid4()}",
|
31 |
+
"object": "chat.completion.chunk",
|
32 |
+
"created": timestamp,
|
33 |
+
"model": model,
|
34 |
+
"choices": [
|
35 |
+
{
|
36 |
+
"index": 0,
|
37 |
+
"delta": {"content": content, "role": "assistant"},
|
38 |
+
"finish_reason": finish_reason,
|
39 |
+
}
|
40 |
+
],
|
41 |
+
"usage": None,
|
42 |
+
}
|
43 |
+
|
44 |
+
def message_to_dict(message, model_prefix: Optional[str] = None):
|
45 |
+
if isinstance(message.content, str):
|
46 |
+
content = message.content
|
47 |
+
if model_prefix:
|
48 |
+
content = f"{model_prefix} {content}"
|
49 |
+
return {"role": message.role, "content": content}
|
50 |
+
elif isinstance(message.content, list) and len(message.content) == 2:
|
51 |
+
content = message.content[0]["text"]
|
52 |
+
if model_prefix:
|
53 |
+
content = f"{model_prefix} {content}"
|
54 |
+
return {
|
55 |
+
"role": message.role,
|
56 |
+
"content": content,
|
57 |
+
"data": {
|
58 |
+
"imageBase64": message.content[1]["image_url"]["url"],
|
59 |
+
"fileText": "",
|
60 |
+
"title": "snapshot",
|
61 |
+
},
|
62 |
+
}
|
63 |
+
else:
|
64 |
+
return {"role": message.role, "content": message.content}
|
65 |
+
|
66 |
+
def strip_model_prefix(content: str, model_prefix: Optional[str] = None) -> str:
|
67 |
+
"""Remove the model prefix from the response content if present."""
|
68 |
+
if model_prefix and content.startswith(model_prefix):
|
69 |
+
logger.debug(f"Stripping prefix '{model_prefix}' from content.")
|
70 |
+
return content[len(model_prefix):].strip()
|
71 |
+
logger.debug("No prefix to strip from content.")
|
72 |
+
return content
|
73 |
+
|
74 |
async def process_streaming_response(request: ChatRequest):
|
75 |
agent_mode = AGENT_MODE.get(request.model, {})
|
76 |
trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
|
77 |
+
model_prefix = MODEL_PREFIXES.get(request.model, "")
|
78 |
referer_path = MODEL_REFERERS.get(request.model, f"/?model={request.model}")
|
79 |
referer_url = f"{BASE_URL}{referer_path}"
|
80 |
|
|
|
82 |
dynamic_headers = headers.copy()
|
83 |
dynamic_headers['Referer'] = referer_url
|
84 |
|
|
|
|
|
|
|
85 |
json_data = {
|
86 |
+
"messages": [message_to_dict(msg, model_prefix=model_prefix) for msg in request.messages],
|
87 |
"previewToken": None,
|
88 |
"userId": None,
|
89 |
"codeModelMode": True,
|
|
|
120 |
content = line
|
121 |
if content.startswith("$@$v=undefined-rv1$@$"):
|
122 |
content = content[21:]
|
123 |
+
# Strip the model prefix from the response content
|
124 |
+
cleaned_content = strip_model_prefix(content, model_prefix)
|
125 |
+
yield f"data: {json.dumps(create_chat_completion_data(cleaned_content, request.model, timestamp))}\n\n"
|
126 |
|
127 |
+
yield f"data: {json.dumps(create_chat_completion_data('', request.model, timestamp, 'stop'))}\n\n"
|
128 |
+
yield "data: [DONE]\n\n"
|
129 |
except httpx.HTTPStatusError as e:
|
130 |
logger.error(f"HTTP error occurred: {e}")
|
131 |
raise HTTPException(status_code=e.response.status_code, detail=str(e))
|
|
|
133 |
logger.error(f"Error occurred during request: {e}")
|
134 |
raise HTTPException(status_code=500, detail=str(e))
|
135 |
|
|
|
136 |
async def process_non_streaming_response(request: ChatRequest):
|
137 |
agent_mode = AGENT_MODE.get(request.model, {})
|
138 |
trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
|
139 |
+
model_prefix = MODEL_PREFIXES.get(request.model, "")
|
140 |
referer_path = MODEL_REFERERS.get(request.model, f"/?model={request.model}")
|
141 |
referer_url = f"{BASE_URL}{referer_path}"
|
142 |
|
|
|
144 |
dynamic_headers = headers.copy()
|
145 |
dynamic_headers['Referer'] = referer_url
|
146 |
|
|
|
|
|
|
|
147 |
json_data = {
|
148 |
+
"messages": [message_to_dict(msg, model_prefix=model_prefix) for msg in request.messages],
|
149 |
"previewToken": None,
|
150 |
"userId": None,
|
151 |
"codeModelMode": True,
|
|
|
168 |
full_response = ""
|
169 |
async with httpx.AsyncClient() as client:
|
170 |
try:
|
171 |
+
async with client.stream(
|
172 |
+
method="POST", url=f"{BASE_URL}/api/chat", headers=dynamic_headers, json=json_data
|
173 |
+
) as response:
|
174 |
+
response.raise_for_status()
|
175 |
+
async for chunk in response.aiter_text():
|
176 |
+
full_response += chunk
|
|
|
|
|
177 |
except httpx.HTTPStatusError as e:
|
178 |
logger.error(f"HTTP error occurred: {e}")
|
179 |
raise HTTPException(status_code=e.response.status_code, detail=str(e))
|
180 |
except httpx.RequestError as e:
|
181 |
logger.error(f"Error occurred during request: {e}")
|
182 |
raise HTTPException(status_code=500, detail=str(e))
|
|
|
183 |
if full_response.startswith("$@$v=undefined-rv1$@$"):
|
184 |
full_response = full_response[21:]
|
185 |
|
186 |
+
# Strip the model prefix from the full response
|
187 |
+
cleaned_full_response = strip_model_prefix(full_response, model_prefix)
|
188 |
+
|
189 |
return {
|
190 |
"id": f"chatcmpl-{uuid.uuid4()}",
|
191 |
"object": "chat.completion",
|
|
|
194 |
"choices": [
|
195 |
{
|
196 |
"index": 0,
|
197 |
+
"message": {"role": "assistant", "content": cleaned_full_response},
|
198 |
"finish_reason": "stop",
|
199 |
}
|
200 |
],
|