Upload 13 files
Browse files- api/app.py +40 -42
- api/auth.py +10 -12
- api/logger.py +20 -23
- api/routes.py +59 -104
- api/utils.py +198 -202
- main.py +5 -7
- requirements.txt +6 -7
api/app.py
CHANGED
@@ -1,42 +1,40 @@
|
|
1 |
-
|
2 |
-
|
3 |
-
from fastapi import
|
4 |
-
from
|
5 |
-
from .routes import router
|
6 |
-
|
7 |
-
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
return app
|
|
|
1 |
+
from fastapi import FastAPI, Request
|
2 |
+
from starlette.middleware.cors import CORSMiddleware
|
3 |
+
from fastapi.responses import JSONResponse
|
4 |
+
from api.logger import setup_logger
|
5 |
+
from api.routes import router
|
6 |
+
|
7 |
+
logger = setup_logger(__name__)
|
8 |
+
|
9 |
+
def create_app():
|
10 |
+
app = FastAPI(
|
11 |
+
title="NiansuhAI API Gateway",
|
12 |
+
docs_url=None, # Disable Swagger UI
|
13 |
+
redoc_url=None, # Disable ReDoc
|
14 |
+
openapi_url=None, # Disable OpenAPI schema
|
15 |
+
)
|
16 |
+
|
17 |
+
# CORS settings
|
18 |
+
app.add_middleware(
|
19 |
+
CORSMiddleware,
|
20 |
+
allow_origins=["*"], # Adjust as needed for security
|
21 |
+
allow_credentials=True,
|
22 |
+
allow_methods=["*"],
|
23 |
+
allow_headers=["*"],
|
24 |
+
)
|
25 |
+
|
26 |
+
# Include routes
|
27 |
+
app.include_router(router)
|
28 |
+
|
29 |
+
# Global exception handler for better error reporting
|
30 |
+
@app.exception_handler(Exception)
|
31 |
+
async def global_exception_handler(request: Request, exc: Exception):
|
32 |
+
logger.error(f"An error occurred: {str(exc)}")
|
33 |
+
return JSONResponse(
|
34 |
+
status_code=500,
|
35 |
+
content={"message": "An internal server error occurred."},
|
36 |
+
)
|
37 |
+
|
38 |
+
return app
|
39 |
+
|
40 |
+
app = create_app()
|
|
|
|
api/auth.py
CHANGED
@@ -1,12 +1,10 @@
|
|
1 |
-
|
2 |
-
|
3 |
-
from
|
4 |
-
|
5 |
-
|
6 |
-
|
7 |
-
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
raise HTTPException(status_code=403, detail="Invalid APP_SECRET")
|
12 |
-
return credentials.credentials
|
|
|
1 |
+
from fastapi import Depends, HTTPException
|
2 |
+
from fastapi.security import HTTPAuthorizationCredentials, HTTPBearer
|
3 |
+
from api.config import APP_SECRET
|
4 |
+
|
5 |
+
security = HTTPBearer()
|
6 |
+
|
7 |
+
def verify_app_secret(credentials: HTTPAuthorizationCredentials = Depends(security)):
|
8 |
+
if credentials.credentials != APP_SECRET:
|
9 |
+
raise HTTPException(status_code=403, detail="Invalid APP_SECRET")
|
10 |
+
return credentials.credentials
|
|
|
|
api/logger.py
CHANGED
@@ -1,23 +1,20 @@
|
|
1 |
-
|
2 |
-
|
3 |
-
|
4 |
-
|
5 |
-
|
6 |
-
|
7 |
-
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
#
|
17 |
-
#
|
18 |
-
#
|
19 |
-
|
20 |
-
|
21 |
-
# logger.addHandler(error_file_handler)
|
22 |
-
|
23 |
-
return logger
|
|
|
1 |
+
import logging
|
2 |
+
|
3 |
+
def setup_logger(name):
|
4 |
+
logger = logging.getLogger(name)
|
5 |
+
if not logger.handlers:
|
6 |
+
logger.setLevel(logging.INFO)
|
7 |
+
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
|
8 |
+
|
9 |
+
# Console handler
|
10 |
+
console_handler = logging.StreamHandler()
|
11 |
+
console_handler.setFormatter(formatter)
|
12 |
+
logger.addHandler(console_handler)
|
13 |
+
|
14 |
+
# File Handler - Error Level
|
15 |
+
# error_file_handler = logging.FileHandler('error.log')
|
16 |
+
# error_file_handler.setFormatter(formatter)
|
17 |
+
# error_file_handler.setLevel(logging.ERROR)
|
18 |
+
# logger.addHandler(error_file_handler)
|
19 |
+
|
20 |
+
return logger
|
|
|
|
|
|
api/routes.py
CHANGED
@@ -1,104 +1,59 @@
|
|
1 |
-
import json
|
2 |
-
from fastapi import APIRouter, Depends, HTTPException, Request
|
3 |
-
from fastapi.responses import StreamingResponse
|
4 |
-
from
|
5 |
-
|
6 |
-
from .
|
7 |
-
from .
|
8 |
-
from .
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
router
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
"Access-Control-Allow-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
)
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
return JSONResponse(
|
61 |
-
status_code=200,
|
62 |
-
headers={
|
63 |
-
"Access-Control-Allow-Origin": "*",
|
64 |
-
"Access-Control-Allow-Methods": "POST, OPTIONS",
|
65 |
-
"Access-Control-Allow-Headers": "Content-Type, Authorization",
|
66 |
-
},
|
67 |
-
)
|
68 |
-
|
69 |
-
@gizai_router.post("/v1/chat/completions")
|
70 |
-
async def gizai_chat_completions(
|
71 |
-
request: ChatRequest, app_secret: str = Depends(verify_app_secret)
|
72 |
-
):
|
73 |
-
logger.info("Entering GizAI chat_completions route")
|
74 |
-
logger.info(f"Processing chat completion request for model: {request.model}")
|
75 |
-
|
76 |
-
# Validate model
|
77 |
-
model = GizAI.get_model(request.model)
|
78 |
-
if model not in GizAI.models:
|
79 |
-
raise HTTPException(
|
80 |
-
status_code=400,
|
81 |
-
detail=f"Model {request.model} is not supported by GizAI. Supported models are: {', '.join(GizAI.models)}",
|
82 |
-
)
|
83 |
-
|
84 |
-
try:
|
85 |
-
async_generator = GizAI.create_async_generator(model=model, messages=request.messages)
|
86 |
-
return StreamingResponse(async_generator, media_type="application/json")
|
87 |
-
except Exception as e:
|
88 |
-
logger.error(f"Error in GizAI chat_completions: {e}", exc_info=True)
|
89 |
-
raise HTTPException(status_code=500, detail=str(e))
|
90 |
-
|
91 |
-
# Include GizAI router in the main router
|
92 |
-
router.include_router(gizai_router)
|
93 |
-
|
94 |
-
|
95 |
-
@router.route('/')
|
96 |
-
@router.route('/healthz')
|
97 |
-
@router.route('/ready')
|
98 |
-
@router.route('/alive')
|
99 |
-
@router.route('/status')
|
100 |
-
@router.get("/health")
|
101 |
-
def health_check(request: Request):
|
102 |
-
return Response(content=json.dumps({"status": "ok"}), media_type="application/json")
|
103 |
-
|
104 |
-
|
|
|
1 |
+
import json
|
2 |
+
from fastapi import APIRouter, Depends, HTTPException, Request, Response
|
3 |
+
from fastapi.responses import StreamingResponse
|
4 |
+
from api.auth import verify_app_secret
|
5 |
+
from api.config import ALLOWED_MODELS
|
6 |
+
from api.models import ChatRequest
|
7 |
+
from api.utils import process_non_streaming_response, process_streaming_response
|
8 |
+
from api.logger import setup_logger
|
9 |
+
|
10 |
+
logger = setup_logger(__name__)
|
11 |
+
|
12 |
+
router = APIRouter()
|
13 |
+
|
14 |
+
@router.options("/v1/chat/completions")
|
15 |
+
@router.options("/api/v1/chat/completions")
|
16 |
+
async def chat_completions_options():
|
17 |
+
return Response(
|
18 |
+
status_code=200,
|
19 |
+
headers={
|
20 |
+
"Access-Control-Allow-Origin": "*",
|
21 |
+
"Access-Control-Allow-Methods": "POST, OPTIONS",
|
22 |
+
"Access-Control-Allow-Headers": "Content-Type, Authorization",
|
23 |
+
},
|
24 |
+
)
|
25 |
+
|
26 |
+
@router.get("/v1/models")
|
27 |
+
@router.get("/api/v1/models")
|
28 |
+
async def list_models():
|
29 |
+
return {"object": "list", "data": ALLOWED_MODELS}
|
30 |
+
|
31 |
+
@router.post("/v1/chat/completions")
|
32 |
+
@router.post("/api/v1/chat/completions")
|
33 |
+
async def chat_completions(
|
34 |
+
request: ChatRequest, app_secret: str = Depends(verify_app_secret)
|
35 |
+
):
|
36 |
+
logger.info("Entering chat_completions route")
|
37 |
+
logger.info(f"Processing chat completion request for model: {request.model}")
|
38 |
+
|
39 |
+
if request.model not in [model["id"] for model in ALLOWED_MODELS]:
|
40 |
+
raise HTTPException(
|
41 |
+
status_code=400,
|
42 |
+
detail=f"Model {request.model} is not allowed. Allowed models are: {', '.join(model['id'] for model in ALLOWED_MODELS)}",
|
43 |
+
)
|
44 |
+
|
45 |
+
if request.stream:
|
46 |
+
logger.info("Streaming response")
|
47 |
+
return StreamingResponse(process_streaming_response(request), media_type="text/event-stream")
|
48 |
+
else:
|
49 |
+
logger.info("Non-streaming response")
|
50 |
+
return await process_non_streaming_response(request)
|
51 |
+
|
52 |
+
@router.route('/')
|
53 |
+
@router.route('/healthz')
|
54 |
+
@router.route('/ready')
|
55 |
+
@router.route('/alive')
|
56 |
+
@router.route('/status')
|
57 |
+
@router.get("/health")
|
58 |
+
def health_check(request: Request):
|
59 |
+
return Response(content=json.dumps({"status": "ok"}), media_type="application/json")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
api/utils.py
CHANGED
@@ -1,202 +1,198 @@
|
|
1 |
-
|
2 |
-
|
3 |
-
from
|
4 |
-
|
5 |
-
|
6 |
-
|
7 |
-
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
from api.
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
)
|
29 |
-
|
30 |
-
"
|
31 |
-
"
|
32 |
-
"
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
"
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
"
|
56 |
-
|
57 |
-
|
58 |
-
"
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
}
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
|
86 |
-
"
|
87 |
-
"
|
88 |
-
"
|
89 |
-
"
|
90 |
-
"
|
91 |
-
"
|
92 |
-
"
|
93 |
-
"
|
94 |
-
"
|
95 |
-
"
|
96 |
-
"
|
97 |
-
"
|
98 |
-
"
|
99 |
-
"
|
100 |
-
"
|
101 |
-
|
102 |
-
|
103 |
-
|
104 |
-
|
105 |
-
|
106 |
-
|
107 |
-
|
108 |
-
|
109 |
-
|
110 |
-
|
111 |
-
|
112 |
-
|
113 |
-
|
114 |
-
|
115 |
-
|
116 |
-
|
117 |
-
|
118 |
-
|
119 |
-
|
120 |
-
|
121 |
-
|
122 |
-
|
123 |
-
|
124 |
-
|
125 |
-
|
126 |
-
|
127 |
-
|
128 |
-
|
129 |
-
|
130 |
-
|
131 |
-
|
132 |
-
|
133 |
-
|
134 |
-
|
135 |
-
|
136 |
-
|
137 |
-
|
138 |
-
|
139 |
-
|
140 |
-
|
141 |
-
|
142 |
-
|
143 |
-
|
144 |
-
|
145 |
-
|
146 |
-
|
147 |
-
|
148 |
-
"
|
149 |
-
"
|
150 |
-
"
|
151 |
-
"
|
152 |
-
"
|
153 |
-
"
|
154 |
-
"
|
155 |
-
"
|
156 |
-
"
|
157 |
-
"
|
158 |
-
"
|
159 |
-
"
|
160 |
-
"
|
161 |
-
"
|
162 |
-
"
|
163 |
-
|
164 |
-
|
165 |
-
|
166 |
-
|
167 |
-
|
168 |
-
|
169 |
-
|
170 |
-
|
171 |
-
|
172 |
-
|
173 |
-
|
174 |
-
|
175 |
-
|
176 |
-
|
177 |
-
|
178 |
-
|
179 |
-
|
180 |
-
|
181 |
-
|
182 |
-
|
183 |
-
|
184 |
-
|
185 |
-
|
186 |
-
|
187 |
-
|
188 |
-
|
189 |
-
|
190 |
-
"
|
191 |
-
|
192 |
-
|
193 |
-
|
194 |
-
|
195 |
-
|
196 |
-
|
197 |
-
|
198 |
-
|
199 |
-
}
|
200 |
-
],
|
201 |
-
"usage": None,
|
202 |
-
}
|
|
|
1 |
+
from datetime import datetime
|
2 |
+
import json
|
3 |
+
from typing import Any, Dict, Optional
|
4 |
+
|
5 |
+
import httpx
|
6 |
+
from api.config import (
|
7 |
+
MODEL_MAPPING,
|
8 |
+
headers,
|
9 |
+
AGENT_MODE,
|
10 |
+
TRENDING_AGENT_MODE,
|
11 |
+
BASE_URL,
|
12 |
+
MODEL_PREFIXES,
|
13 |
+
MODEL_REFERERS
|
14 |
+
)
|
15 |
+
from fastapi import HTTPException
|
16 |
+
from api.models import ChatRequest
|
17 |
+
|
18 |
+
from api.logger import setup_logger
|
19 |
+
|
20 |
+
import uuid # Added import for uuid
|
21 |
+
|
22 |
+
logger = setup_logger(__name__)
|
23 |
+
|
24 |
+
def create_chat_completion_data(
|
25 |
+
content: str, model: str, timestamp: int, finish_reason: Optional[str] = None
|
26 |
+
) -> Dict[str, Any]:
|
27 |
+
return {
|
28 |
+
"id": f"chatcmpl-{uuid.uuid4()}",
|
29 |
+
"object": "chat.completion.chunk",
|
30 |
+
"created": timestamp,
|
31 |
+
"model": model,
|
32 |
+
"choices": [
|
33 |
+
{
|
34 |
+
"index": 0,
|
35 |
+
"delta": {"content": content, "role": "assistant"},
|
36 |
+
"finish_reason": finish_reason,
|
37 |
+
}
|
38 |
+
],
|
39 |
+
"usage": None,
|
40 |
+
}
|
41 |
+
|
42 |
+
def message_to_dict(message, model_prefix: Optional[str] = None):
|
43 |
+
if isinstance(message.content, str):
|
44 |
+
content = message.content
|
45 |
+
if model_prefix:
|
46 |
+
content = f"{model_prefix} {content}"
|
47 |
+
return {"role": message.role, "content": content}
|
48 |
+
elif isinstance(message.content, list) and len(message.content) == 2:
|
49 |
+
content = message.content[0]["text"]
|
50 |
+
if model_prefix:
|
51 |
+
content = f"{model_prefix} {content}"
|
52 |
+
return {
|
53 |
+
"role": message.role,
|
54 |
+
"content": content,
|
55 |
+
"data": {
|
56 |
+
"imageBase64": message.content[1]["image_url"]["url"],
|
57 |
+
"fileText": "",
|
58 |
+
"title": "snapshot",
|
59 |
+
},
|
60 |
+
}
|
61 |
+
else:
|
62 |
+
return {"role": message.role, "content": message.content}
|
63 |
+
|
64 |
+
def strip_model_prefix(content: str, model_prefix: str) -> str:
|
65 |
+
"""Remove the model prefix from the response content if present."""
|
66 |
+
if content.startswith(model_prefix):
|
67 |
+
return content[len(model_prefix):].strip()
|
68 |
+
return content
|
69 |
+
|
70 |
+
async def process_streaming_response(request: ChatRequest):
|
71 |
+
agent_mode = AGENT_MODE.get(request.model, {})
|
72 |
+
trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
|
73 |
+
model_prefix = MODEL_PREFIXES.get(request.model, "")
|
74 |
+
referer_path = MODEL_REFERERS.get(request.model, f"/?model={request.model}")
|
75 |
+
referer_url = f"{BASE_URL}{referer_path}"
|
76 |
+
|
77 |
+
# Update headers with dynamic Referer
|
78 |
+
dynamic_headers = headers.copy()
|
79 |
+
dynamic_headers['Referer'] = referer_url
|
80 |
+
|
81 |
+
json_data = {
|
82 |
+
"messages": [message_to_dict(msg, model_prefix=model_prefix) for msg in request.messages],
|
83 |
+
"previewToken": None,
|
84 |
+
"userId": None,
|
85 |
+
"codeModelMode": True,
|
86 |
+
"agentMode": agent_mode,
|
87 |
+
"trendingAgentMode": trending_agent_mode,
|
88 |
+
"isMicMode": False,
|
89 |
+
"userSystemPrompt": None,
|
90 |
+
"maxTokens": request.max_tokens,
|
91 |
+
"playgroundTopP": request.top_p,
|
92 |
+
"playgroundTemperature": request.temperature,
|
93 |
+
"isChromeExt": False,
|
94 |
+
"githubToken": None,
|
95 |
+
"clickedAnswer2": False,
|
96 |
+
"clickedAnswer3": False,
|
97 |
+
"clickedForceWebSearch": False,
|
98 |
+
"visitFromDelta": False,
|
99 |
+
"mobileClient": False,
|
100 |
+
"userSelectedModel": MODEL_MAPPING.get(request.model, request.model),
|
101 |
+
}
|
102 |
+
|
103 |
+
async with httpx.AsyncClient() as client:
|
104 |
+
try:
|
105 |
+
async with client.stream(
|
106 |
+
"POST",
|
107 |
+
f"{BASE_URL}/api/chat",
|
108 |
+
headers=dynamic_headers,
|
109 |
+
json=json_data,
|
110 |
+
timeout=100,
|
111 |
+
) as response:
|
112 |
+
response.raise_for_status()
|
113 |
+
async for line in response.aiter_lines():
|
114 |
+
timestamp = int(datetime.now().timestamp())
|
115 |
+
if line:
|
116 |
+
content = line
|
117 |
+
if content.startswith("$@$v=undefined-rv1$@$"):
|
118 |
+
content = content[21:]
|
119 |
+
# Strip the model prefix from the response content
|
120 |
+
cleaned_content = strip_model_prefix(content, model_prefix)
|
121 |
+
yield f"data: {json.dumps(create_chat_completion_data(cleaned_content, request.model, timestamp))}\n\n"
|
122 |
+
|
123 |
+
yield f"data: {json.dumps(create_chat_completion_data('', request.model, timestamp, 'stop'))}\n\n"
|
124 |
+
yield "data: [DONE]\n\n"
|
125 |
+
except httpx.HTTPStatusError as e:
|
126 |
+
logger.error(f"HTTP error occurred: {e}")
|
127 |
+
raise HTTPException(status_code=e.response.status_code, detail=str(e))
|
128 |
+
except httpx.RequestError as e:
|
129 |
+
logger.error(f"Error occurred during request: {e}")
|
130 |
+
raise HTTPException(status_code=500, detail=str(e))
|
131 |
+
|
132 |
+
async def process_non_streaming_response(request: ChatRequest):
|
133 |
+
agent_mode = AGENT_MODE.get(request.model, {})
|
134 |
+
trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
|
135 |
+
model_prefix = MODEL_PREFIXES.get(request.model, "")
|
136 |
+
referer_path = MODEL_REFERERS.get(request.model, f"/?model={request.model}")
|
137 |
+
referer_url = f"{BASE_URL}{referer_path}"
|
138 |
+
|
139 |
+
# Update headers with dynamic Referer
|
140 |
+
dynamic_headers = headers.copy()
|
141 |
+
dynamic_headers['Referer'] = referer_url
|
142 |
+
|
143 |
+
json_data = {
|
144 |
+
"messages": [message_to_dict(msg, model_prefix=model_prefix) for msg in request.messages],
|
145 |
+
"previewToken": None,
|
146 |
+
"userId": None,
|
147 |
+
"codeModelMode": True,
|
148 |
+
"agentMode": agent_mode,
|
149 |
+
"trendingAgentMode": trending_agent_mode,
|
150 |
+
"isMicMode": False,
|
151 |
+
"userSystemPrompt": None,
|
152 |
+
"maxTokens": request.max_tokens,
|
153 |
+
"playgroundTopP": request.top_p,
|
154 |
+
"playgroundTemperature": request.temperature,
|
155 |
+
"isChromeExt": False,
|
156 |
+
"githubToken": None,
|
157 |
+
"clickedAnswer2": False,
|
158 |
+
"clickedAnswer3": False,
|
159 |
+
"clickedForceWebSearch": False,
|
160 |
+
"visitFromDelta": False,
|
161 |
+
"mobileClient": False,
|
162 |
+
"userSelectedModel": MODEL_MAPPING.get(request.model, request.model),
|
163 |
+
}
|
164 |
+
full_response = ""
|
165 |
+
async with httpx.AsyncClient() as client:
|
166 |
+
try:
|
167 |
+
async with client.stream(
|
168 |
+
method="POST", url=f"{BASE_URL}/api/chat", headers=dynamic_headers, json=json_data
|
169 |
+
) as response:
|
170 |
+
response.raise_for_status()
|
171 |
+
async for chunk in response.aiter_text():
|
172 |
+
full_response += chunk
|
173 |
+
except httpx.HTTPStatusError as e:
|
174 |
+
logger.error(f"HTTP error occurred: {e}")
|
175 |
+
raise HTTPException(status_code=e.response.status_code, detail=str(e))
|
176 |
+
except httpx.RequestError as e:
|
177 |
+
logger.error(f"Error occurred during request: {e}")
|
178 |
+
raise HTTPException(status_code=500, detail=str(e))
|
179 |
+
if full_response.startswith("$@$v=undefined-rv1$@$"):
|
180 |
+
full_response = full_response[21:]
|
181 |
+
|
182 |
+
# Strip the model prefix from the full response
|
183 |
+
cleaned_full_response = strip_model_prefix(full_response, model_prefix)
|
184 |
+
|
185 |
+
return {
|
186 |
+
"id": f"chatcmpl-{uuid.uuid4()}",
|
187 |
+
"object": "chat.completion",
|
188 |
+
"created": int(datetime.now().timestamp()),
|
189 |
+
"model": request.model,
|
190 |
+
"choices": [
|
191 |
+
{
|
192 |
+
"index": 0,
|
193 |
+
"message": {"role": "assistant", "content": cleaned_full_response},
|
194 |
+
"finish_reason": "stop",
|
195 |
+
}
|
196 |
+
],
|
197 |
+
"usage": None,
|
198 |
+
}
|
|
|
|
|
|
|
|
main.py
CHANGED
@@ -1,7 +1,5 @@
|
|
1 |
-
import uvicorn
|
2 |
-
from api.app import
|
3 |
-
|
4 |
-
|
5 |
-
|
6 |
-
if __name__ == "__main__":
|
7 |
-
uvicorn.run(app, host="0.0.0.0", port=8001)
|
|
|
1 |
+
import uvicorn
|
2 |
+
from api.app import app
|
3 |
+
|
4 |
+
if __name__ == "__main__":
|
5 |
+
uvicorn.run(app, host="0.0.0.0", port=8001)
|
|
|
|
requirements.txt
CHANGED
@@ -1,7 +1,6 @@
|
|
1 |
-
fastapi==0.95.2
|
2 |
-
httpx==0.23.3
|
3 |
-
pydantic==1.10.4
|
4 |
-
python-dotenv==0.21.0
|
5 |
-
uvicorn==0.21.1
|
6 |
-
gunicorn==20.1.0
|
7 |
-
aiohttp==3.8.4
|
|
|
1 |
+
fastapi==0.95.2
|
2 |
+
httpx==0.23.3
|
3 |
+
pydantic==1.10.4
|
4 |
+
python-dotenv==0.21.0
|
5 |
+
uvicorn==0.21.1
|
6 |
+
gunicorn==20.1.0
|
|