Niansuh commited on
Commit
0c6aac2
·
verified ·
1 Parent(s): fc025a6

Upload 13 files

Browse files
Files changed (5) hide show
  1. api/__pycache__/dummy.txt +1 -0
  2. api/config.py +153 -157
  3. api/models.py +1 -1
  4. api/routes.py +59 -61
  5. api/utils.py +198 -200
api/__pycache__/dummy.txt ADDED
@@ -0,0 +1 @@
 
 
1
+
api/config.py CHANGED
@@ -1,157 +1,153 @@
1
- # api/config.py
2
-
3
- import os
4
- from dotenv import load_dotenv
5
-
6
- load_dotenv()
7
-
8
- BASE_URL = "https://www.blackbox.ai"
9
- headers = {
10
- 'accept': '*/*',
11
- 'accept-language': 'en-US,en;q=0.9',
12
- 'origin': 'https://www.blackbox.ai',
13
- 'priority': 'u=1, i',
14
- 'sec-ch-ua': '"Google Chrome";v="129", "Not=A?Brand";v="8", "Chromium";v="129"',
15
- 'sec-ch-ua-mobile': '?0',
16
- 'sec-ch-ua-platform': '"Windows"',
17
- 'sec-fetch-dest': 'empty',
18
- 'sec-fetch-mode': 'cors',
19
- 'sec-fetch-site': 'same-origin',
20
- 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) '
21
- 'AppleWebKit/537.36 (KHTML, like Gecko) '
22
- 'Chrome/129.0.0.0 Safari/537.36',
23
- }
24
- APP_SECRET = os.getenv("APP_SECRET")
25
-
26
- ALLOWED_MODELS = [
27
- {"id": "blackboxai", "name": "blackboxai"},
28
- {"id": "blackboxai-pro", "name": "blackboxai-pro"},
29
- {"id": "flux", "name": "flux"},
30
- {"id": "llama-3.1-8b", "name": "llama-3.1-8b"},
31
- {"id": "llama-3.1-70b", "name": "llama-3.1-70b"},
32
- {"id": "llama-3.1-405b", "name": "llama-3.1-405b"},
33
- {"id": "gpt-4o", "name": "gpt-4o"},
34
- {"id": "gemini-pro", "name": "gemini-pro"},
35
- {"id": "gemini-1.5-flash", "name": "gemini-1.5-flash"},
36
- {"id": "claude-sonnet-3.5", "name": "claude-sonnet-3.5"},
37
- {"id": "PythonAgent", "name": "PythonAgent"},
38
- {"id": "JavaAgent", "name": "JavaAgent"},
39
- {"id": "JavaScriptAgent", "name": "JavaScriptAgent"},
40
- {"id": "HTMLAgent", "name": "HTMLAgent"},
41
- {"id": "GoogleCloudAgent", "name": "GoogleCloudAgent"},
42
- {"id": "AndroidDeveloper", "name": "AndroidDeveloper"},
43
- {"id": "SwiftDeveloper", "name": "SwiftDeveloper"},
44
- {"id": "Next.jsAgent", "name": "Next.jsAgent"},
45
- {"id": "MongoDBAgent", "name": "MongoDBAgent"},
46
- {"id": "PyTorchAgent", "name": "PyTorchAgent"},
47
- {"id": "ReactAgent", "name": "ReactAgent"},
48
- {"id": "XcodeAgent", "name": "XcodeAgent"},
49
- {"id": "AngularJSAgent", "name": "AngularJSAgent"},
50
- {"id": "RepoMap", "name": "RepoMap"},
51
- {"id": "gemini-1.5-pro-latest", "name": "gemini-pro"},
52
- {"id": "gemini-1.5-pro", "name": "gemini-1.5-pro"},
53
- {"id": "claude-3-5-sonnet-20240620", "name": "claude-sonnet-3.5"},
54
- {"id": "claude-3-5-sonnet", "name": "claude-sonnet-3.5"},
55
- ]
56
-
57
- MODEL_MAPPING = {
58
- "blackboxai": "blackboxai",
59
- "blackboxai-pro": "blackboxai-pro",
60
- "flux": "flux",
61
- "ImageGeneration": "flux",
62
- "llama-3.1-8b": "llama-3.1-8b",
63
- "llama-3.1-70b": "llama-3.1-70b",
64
- "llama-3.1-405b": "llama-3.1-405b",
65
- "gpt-4o": "gpt-4o",
66
- "gemini-pro": "gemini-pro",
67
- "gemini-1.5-flash": "gemini-1.5-flash",
68
- "claude-sonnet-3.5": "claude-sonnet-3.5",
69
- "PythonAgent": "PythonAgent",
70
- "JavaAgent": "JavaAgent",
71
- "JavaScriptAgent": "JavaScriptAgent",
72
- "HTMLAgent": "HTMLAgent",
73
- "GoogleCloudAgent": "GoogleCloudAgent",
74
- "AndroidDeveloper": "AndroidDeveloper",
75
- "SwiftDeveloper": "SwiftDeveloper",
76
- "Next.jsAgent": "Next.jsAgent",
77
- "MongoDBAgent": "MongoDBAgent",
78
- "PyTorchAgent": "PyTorchAgent",
79
- "ReactAgent": "ReactAgent",
80
- "XcodeAgent": "XcodeAgent",
81
- "AngularJSAgent": "AngularJSAgent",
82
- "RepoMap": "RepoMap",
83
- # Additional mappings
84
- "gemini-flash": "gemini-1.5-flash",
85
- "claude-3.5-sonnet": "claude-sonnet-3.5",
86
- "flux": "flux",
87
- "gemini-1.5-pro-latest": "gemini-pro",
88
- "gemini-1.5-pro": "gemini-1.5-pro",
89
- "claude-3-5-sonnet-20240620": "claude-sonnet-3.5",
90
- "claude-3-5-sonnet": "claude-sonnet-3.5",
91
- }
92
-
93
- # Agent modes
94
- AGENT_MODE = {
95
- 'flux': {'mode': True, 'id': "ImageGenerationLV45LJp", 'name': "flux"},
96
- }
97
-
98
- TRENDING_AGENT_MODE = {
99
- "blackboxai": {},
100
- "gemini-1.5-flash": {'mode': True, 'id': 'Gemini'},
101
- "llama-3.1-8b": {'mode': True, 'id': "llama-3.1-8b"},
102
- 'llama-3.1-70b': {'mode': True, 'id': "llama-3.1-70b"},
103
- 'llama-3.1-405b': {'mode': True, 'id': "llama-3.1-405b"},
104
- 'blackboxai-pro': {'mode': True, 'id': "BLACKBOXAI-PRO"},
105
- 'PythonAgent': {'mode': True, 'id': "Python Agent"},
106
- 'JavaAgent': {'mode': True, 'id': "Java Agent"},
107
- 'JavaScriptAgent': {'mode': True, 'id': "JavaScript Agent"},
108
- 'HTMLAgent': {'mode': True, 'id': "HTML Agent"},
109
- 'GoogleCloudAgent': {'mode': True, 'id': "Google Cloud Agent"},
110
- 'AndroidDeveloper': {'mode': True, 'id': "Android Developer"},
111
- 'SwiftDeveloper': {'mode': True, 'id': "Swift Developer"},
112
- 'Next.jsAgent': {'mode': True, 'id': "Next.js Agent"},
113
- 'MongoDBAgent': {'mode': True, 'id': "MongoDB Agent"},
114
- 'PyTorchAgent': {'mode': True, 'id': "PyTorch Agent"},
115
- 'ReactAgent': {'mode': True, 'id': "React Agent"},
116
- 'XcodeAgent': {'mode': True, 'id': "Xcode Agent"},
117
- 'AngularJSAgent': {'mode': True, 'id': "AngularJS Agent"},
118
- 'RepoMap': {'mode': True, 'id': "repomap"},
119
- }
120
-
121
- # Model prefixes
122
- MODEL_PREFIXES = {
123
- "blackboxai": "@blackboxai",
124
- "blackboxai-pro": "@blackboxai-pro",
125
- "flux": "@flux",
126
- "llama-3.1-8b": "@llama-3.1-8b",
127
- "llama-3.1-70b": "@llama-3.1-70b",
128
- "llama-3.1-405b": "@llama-3.1-405b",
129
- "gpt-4o": "@gpt-4o",
130
- "gemini-pro": "@gemini-pro",
131
- "gemini-1.5-flash": "@gemini-1.5-flash",
132
- "claude-sonnet-3.5": "@claude-sonnet-3.5",
133
- "PythonAgent": "@PythonAgent",
134
- "JavaAgent": "@JavaAgent",
135
- "JavaScriptAgent": "@JavaScriptAgent",
136
- "HTMLAgent": "@HTMLAgent",
137
- "GoogleCloudAgent": "@GoogleCloudAgent",
138
- "AndroidDeveloper": "@AndroidDeveloper",
139
- "SwiftDeveloper": "@SwiftDeveloper",
140
- "Next.jsAgent": "@Next.jsAgent",
141
- "MongoDBAgent": "@MongoDBAgent",
142
- "PyTorchAgent": "@PyTorchAgent",
143
- "ReactAgent": "@ReactAgent",
144
- "XcodeAgent": "@XcodeAgent",
145
- "AngularJSAgent": "@AngularJSAgent",
146
- "RepoMap": "@RepoMap",
147
- # Add any additional prefixes if necessary
148
- }
149
-
150
- # Model referers
151
- MODEL_REFERERS = {
152
- "blackboxai": "/?model=blackboxai",
153
- "gpt-4o": "/?model=gpt-4o",
154
- "gemini-pro": "/?model=gemini-pro",
155
- "claude-sonnet-3.5": "/?model=claude-sonnet-3.5"
156
- # Add any additional referers if necessary
157
- }
 
1
+ import os
2
+ from dotenv import load_dotenv
3
+
4
+ load_dotenv()
5
+
6
+ BASE_URL = "https://www.blackbox.ai"
7
+ headers = {
8
+ 'accept': '*/*',
9
+ 'accept-language': 'en-US,en;q=0.9',
10
+ 'origin': 'https://www.blackbox.ai',
11
+ 'priority': 'u=1, i',
12
+ 'sec-ch-ua': '"Chromium";v="130", "Google Chrome";v="130", "Not?A_Brand";v="99"',
13
+ 'sec-ch-ua-mobile': '?0',
14
+ 'sec-ch-ua-platform': '"Windows"',
15
+ 'sec-fetch-dest': 'empty',
16
+ 'sec-fetch-mode': 'cors',
17
+ 'sec-fetch-site': 'same-origin',
18
+ 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) '
19
+ 'AppleWebKit/537.36 (KHTML, like Gecko) '
20
+ 'Chrome/130.0.0.0 Safari/537.36',
21
+ }
22
+ APP_SECRET = os.getenv("APP_SECRET")
23
+
24
+ ALLOWED_MODELS = [
25
+ {"id": "blackboxai", "name": "blackboxai"},
26
+ {"id": "blackboxai-pro", "name": "blackboxai-pro"},
27
+ {"id": "flux", "name": "flux"},
28
+ {"id": "llama-3.1-8b", "name": "llama-3.1-8b"},
29
+ {"id": "llama-3.1-70b", "name": "llama-3.1-70b"},
30
+ {"id": "llama-3.1-405b", "name": "llama-3.1-405b"},
31
+ {"id": "gpt-4o", "name": "gpt-4o"},
32
+ {"id": "gemini-pro", "name": "gemini-pro"},
33
+ {"id": "gemini-1.5-flash", "name": "gemini-1.5-flash"},
34
+ {"id": "claude-sonnet-3.5", "name": "claude-sonnet-3.5"},
35
+ {"id": "PythonAgent", "name": "PythonAgent"},
36
+ {"id": "JavaAgent", "name": "JavaAgent"},
37
+ {"id": "JavaScriptAgent", "name": "JavaScriptAgent"},
38
+ {"id": "HTMLAgent", "name": "HTMLAgent"},
39
+ {"id": "GoogleCloudAgent", "name": "GoogleCloudAgent"},
40
+ {"id": "AndroidDeveloper", "name": "AndroidDeveloper"},
41
+ {"id": "SwiftDeveloper", "name": "SwiftDeveloper"},
42
+ {"id": "Next.jsAgent", "name": "Next.jsAgent"},
43
+ {"id": "MongoDBAgent", "name": "MongoDBAgent"},
44
+ {"id": "PyTorchAgent", "name": "PyTorchAgent"},
45
+ {"id": "ReactAgent", "name": "ReactAgent"},
46
+ {"id": "XcodeAgent", "name": "XcodeAgent"},
47
+ {"id": "AngularJSAgent", "name": "AngularJSAgent"},
48
+ {"id": "RepoMap", "name": "RepoMap"},
49
+ {"id": "gemini-1.5-pro-latest", "name": "gemini-pro"},
50
+ {"id": "gemini-1.5-pro", "name": "gemini-1.5-pro"},
51
+ {"id": "claude-3-5-sonnet-20240620", "name": "claude-sonnet-3.5"},
52
+ {"id": "claude-3-5-sonnet", "name": "claude-sonnet-3.5"},
53
+ {"id": "Niansuh", "name": "Niansuh"},
54
+ ]
55
+
56
+ MODEL_MAPPING = {
57
+ "blackboxai": "blackboxai",
58
+ "blackboxai-pro": "blackboxai-pro",
59
+ "flux": "flux",
60
+ "ImageGeneration": "flux",
61
+ "llama-3.1-8b": "llama-3.1-8b",
62
+ "llama-3.1-70b": "llama-3.1-70b",
63
+ "llama-3.1-405b": "llama-3.1-405b",
64
+ "gpt-4o": "gpt-4o",
65
+ "gemini-pro": "gemini-pro",
66
+ "gemini-1.5-flash": "gemini-1.5-flash",
67
+ "claude-sonnet-3.5": "claude-sonnet-3.5",
68
+ "PythonAgent": "PythonAgent",
69
+ "JavaAgent": "JavaAgent",
70
+ "JavaScriptAgent": "JavaScriptAgent",
71
+ "HTMLAgent": "HTMLAgent",
72
+ "GoogleCloudAgent": "GoogleCloudAgent",
73
+ "AndroidDeveloper": "AndroidDeveloper",
74
+ "SwiftDeveloper": "SwiftDeveloper",
75
+ "Next.jsAgent": "Next.jsAgent",
76
+ "MongoDBAgent": "MongoDBAgent",
77
+ "PyTorchAgent": "PyTorchAgent",
78
+ "ReactAgent": "ReactAgent",
79
+ "XcodeAgent": "XcodeAgent",
80
+ "AngularJSAgent": "AngularJSAgent",
81
+ "RepoMap": "RepoMap",
82
+ # Additional mappings
83
+ "gemini-flash": "gemini-1.5-flash",
84
+ "claude-3.5-sonnet": "claude-sonnet-3.5",
85
+ "flux": "flux",
86
+ "gemini-1.5-pro-latest": "gemini-pro",
87
+ "gemini-1.5-pro": "gemini-1.5-pro",
88
+ "claude-3-5-sonnet-20240620": "claude-sonnet-3.5",
89
+ "claude-3-5-sonnet": "claude-sonnet-3.5",
90
+ "Niansuh": "Niansuh",
91
+ }
92
+
93
+ # Agent modes
94
+ AGENT_MODE = {
95
+ 'flux': {'mode': True, 'id': "ImageGenerationLV45LJp", 'name': "flux"},
96
+ 'Niansuh': {'mode': True, 'id': "NiansuhAIk1HgESy", 'name': "Niansuh"},
97
+
98
+ }
99
+
100
+ TRENDING_AGENT_MODE = {
101
+ "blackboxai": {},
102
+ "gemini-1.5-flash": {'mode': True, 'id': 'Gemini'},
103
+ "llama-3.1-8b": {'mode': True, 'id': "llama-3.1-8b"},
104
+ 'llama-3.1-70b': {'mode': True, 'id': "llama-3.1-70b"},
105
+ 'llama-3.1-405b': {'mode': True, 'id': "llama-3.1-405b"},
106
+ 'blackboxai-pro': {'mode': True, 'id': "BLACKBOXAI-PRO"},
107
+ 'PythonAgent': {'mode': True, 'id': "Python Agent"},
108
+ 'JavaAgent': {'mode': True, 'id': "Java Agent"},
109
+ 'JavaScriptAgent': {'mode': True, 'id': "JavaScript Agent"},
110
+ 'HTMLAgent': {'mode': True, 'id': "HTML Agent"},
111
+ 'GoogleCloudAgent': {'mode': True, 'id': "Google Cloud Agent"},
112
+ 'AndroidDeveloper': {'mode': True, 'id': "Android Developer"},
113
+ 'SwiftDeveloper': {'mode': True, 'id': "Swift Developer"},
114
+ 'Next.jsAgent': {'mode': True, 'id': "Next.js Agent"},
115
+ 'MongoDBAgent': {'mode': True, 'id': "MongoDB Agent"},
116
+ 'PyTorchAgent': {'mode': True, 'id': "PyTorch Agent"},
117
+ 'ReactAgent': {'mode': True, 'id': "React Agent"},
118
+ 'XcodeAgent': {'mode': True, 'id': "Xcode Agent"},
119
+ 'AngularJSAgent': {'mode': True, 'id': "AngularJS Agent"},
120
+ 'RepoMap': {'mode': True, 'id': "repomap"},
121
+ }
122
+
123
+ # Model prefixes
124
+ MODEL_PREFIXES = {
125
+ 'gpt-4o': '@GPT-4o',
126
+ 'gemini-pro': '@Gemini-PRO',
127
+ 'claude-sonnet-3.5': '@Claude-Sonnet-3.5',
128
+ 'PythonAgent': '@Python Agent',
129
+ 'JavaAgent': '@Java Agent',
130
+ 'JavaScriptAgent': '@JavaScript Agent',
131
+ 'HTMLAgent': '@HTML Agent',
132
+ 'GoogleCloudAgent': '@Google Cloud Agent',
133
+ 'AndroidDeveloper': '@Android Developer',
134
+ 'SwiftDeveloper': '@Swift Developer',
135
+ 'Next.jsAgent': '@Next.js Agent',
136
+ 'MongoDBAgent': '@MongoDB Agent',
137
+ 'PyTorchAgent': '@PyTorch Agent',
138
+ 'ReactAgent': '@React Agent',
139
+ 'XcodeAgent': '@Xcode Agent',
140
+ 'AngularJSAgent': '@AngularJS Agent',
141
+ 'blackboxai-pro': '@BLACKBOXAI-PRO',
142
+ 'flux': '@Image Generation',
143
+ # Add any additional prefixes if necessary
144
+ }
145
+
146
+ # Model referers
147
+ MODEL_REFERERS = {
148
+ "blackboxai": "/?model=blackboxai",
149
+ "gpt-4o": "/?model=gpt-4o",
150
+ "gemini-pro": "/?model=gemini-pro",
151
+ "claude-sonnet-3.5": "/?model=claude-sonnet-3.5"
152
+ # Add any additional referers if necessary
153
+ }
 
 
 
 
api/models.py CHANGED
@@ -11,4 +11,4 @@ class ChatRequest(BaseModel):
11
  stream: Optional[bool] = False
12
  temperature: Optional[float] = 0.7
13
  top_p: Optional[float] = 0.9
14
- max_tokens: Optional[int] = 8192
 
11
  stream: Optional[bool] = False
12
  temperature: Optional[float] = 0.7
13
  top_p: Optional[float] = 0.9
14
+ max_tokens: Optional[int] = 99999999
api/routes.py CHANGED
@@ -1,61 +1,59 @@
1
- # api/routes.py
2
-
3
- import json
4
- from fastapi import APIRouter, Depends, HTTPException, Request, Response
5
- from fastapi.responses import StreamingResponse
6
- from api.auth import verify_app_secret
7
- from api.config import ALLOWED_MODELS
8
- from api.models import ChatRequest
9
- from api.utils import process_non_streaming_response, process_streaming_response
10
- from api.logger import setup_logger
11
-
12
- logger = setup_logger(__name__)
13
-
14
- router = APIRouter()
15
-
16
- @router.options("/v1/chat/completions")
17
- @router.options("/api/v1/chat/completions")
18
- async def chat_completions_options():
19
- return Response(
20
- status_code=200,
21
- headers={
22
- "Access-Control-Allow-Origin": "*",
23
- "Access-Control-Allow-Methods": "POST, OPTIONS",
24
- "Access-Control-Allow-Headers": "Content-Type, Authorization",
25
- },
26
- )
27
-
28
- @router.get("/v1/models")
29
- @router.get("/api/v1/models")
30
- async def list_models():
31
- return {"object": "list", "data": ALLOWED_MODELS}
32
-
33
- @router.post("/v1/chat/completions")
34
- @router.post("/api/v1/chat/completions")
35
- async def chat_completions(
36
- request: ChatRequest, app_secret: str = Depends(verify_app_secret)
37
- ):
38
- logger.info("Entering chat_completions route")
39
- logger.info(f"Processing chat completion request for model: {request.model}")
40
-
41
- if request.model not in [model["id"] for model in ALLOWED_MODELS]:
42
- raise HTTPException(
43
- status_code=400,
44
- detail=f"Model {request.model} is not allowed. Allowed models are: {', '.join(model['id'] for model in ALLOWED_MODELS)}",
45
- )
46
-
47
- if request.stream:
48
- logger.info("Streaming response")
49
- return StreamingResponse(process_streaming_response(request), media_type="text/event-stream")
50
- else:
51
- logger.info("Non-streaming response")
52
- return await process_non_streaming_response(request)
53
-
54
- @router.route('/')
55
- @router.route('/healthz')
56
- @router.route('/ready')
57
- @router.route('/alive')
58
- @router.route('/status')
59
- @router.get("/health")
60
- def health_check(request: Request):
61
- return Response(content=json.dumps({"status": "ok"}), media_type="application/json")
 
1
+ import json
2
+ from fastapi import APIRouter, Depends, HTTPException, Request, Response
3
+ from fastapi.responses import StreamingResponse
4
+ from api.auth import verify_app_secret
5
+ from api.config import ALLOWED_MODELS
6
+ from api.models import ChatRequest
7
+ from api.utils import process_non_streaming_response, process_streaming_response
8
+ from api.logger import setup_logger
9
+
10
+ logger = setup_logger(__name__)
11
+
12
+ router = APIRouter()
13
+
14
+ @router.options("/v1/chat/completions")
15
+ @router.options("/api/v1/chat/completions")
16
+ async def chat_completions_options():
17
+ return Response(
18
+ status_code=200,
19
+ headers={
20
+ "Access-Control-Allow-Origin": "*",
21
+ "Access-Control-Allow-Methods": "POST, OPTIONS",
22
+ "Access-Control-Allow-Headers": "Content-Type, Authorization",
23
+ },
24
+ )
25
+
26
+ @router.get("/v1/models")
27
+ @router.get("/api/v1/models")
28
+ async def list_models():
29
+ return {"object": "list", "data": ALLOWED_MODELS}
30
+
31
+ @router.post("/v1/chat/completions")
32
+ @router.post("/api/v1/chat/completions")
33
+ async def chat_completions(
34
+ request: ChatRequest, app_secret: str = Depends(verify_app_secret)
35
+ ):
36
+ logger.info("Entering chat_completions route")
37
+ logger.info(f"Processing chat completion request for model: {request.model}")
38
+
39
+ if request.model not in [model["id"] for model in ALLOWED_MODELS]:
40
+ raise HTTPException(
41
+ status_code=400,
42
+ detail=f"Model {request.model} is not allowed. Allowed models are: {', '.join(model['id'] for model in ALLOWED_MODELS)}",
43
+ )
44
+
45
+ if request.stream:
46
+ logger.info("Streaming response")
47
+ return StreamingResponse(process_streaming_response(request), media_type="text/event-stream")
48
+ else:
49
+ logger.info("Non-streaming response")
50
+ return await process_non_streaming_response(request)
51
+
52
+ @router.route('/')
53
+ @router.route('/healthz')
54
+ @router.route('/ready')
55
+ @router.route('/alive')
56
+ @router.route('/status')
57
+ @router.get("/health")
58
+ def health_check(request: Request):
59
+ return Response(content=json.dumps({"status": "ok"}), media_type="application/json")
 
 
api/utils.py CHANGED
@@ -1,200 +1,198 @@
1
- # api/utils.py
2
-
3
- from datetime import datetime
4
- import json
5
- from typing import Any, Dict, Optional
6
-
7
- import httpx
8
- from api.config import (
9
- MODEL_MAPPING,
10
- headers,
11
- AGENT_MODE,
12
- TRENDING_AGENT_MODE,
13
- BASE_URL,
14
- MODEL_PREFIXES,
15
- MODEL_REFERERS
16
- )
17
- from fastapi import HTTPException
18
- from api.models import ChatRequest
19
-
20
- from api.logger import setup_logger
21
-
22
- import uuid # Added import for uuid
23
-
24
- logger = setup_logger(__name__)
25
-
26
- def create_chat_completion_data(
27
- content: str, model: str, timestamp: int, finish_reason: Optional[str] = None
28
- ) -> Dict[str, Any]:
29
- return {
30
- "id": f"chatcmpl-{uuid.uuid4()}",
31
- "object": "chat.completion.chunk",
32
- "created": timestamp,
33
- "model": model,
34
- "choices": [
35
- {
36
- "index": 0,
37
- "delta": {"content": content, "role": "assistant"},
38
- "finish_reason": finish_reason,
39
- }
40
- ],
41
- "usage": None,
42
- }
43
-
44
- def message_to_dict(message, model_prefix: Optional[str] = None):
45
- if isinstance(message.content, str):
46
- content = message.content
47
- if model_prefix:
48
- content = f"{model_prefix} {content}"
49
- return {"role": message.role, "content": content}
50
- elif isinstance(message.content, list) and len(message.content) == 2:
51
- content = message.content[0]["text"]
52
- if model_prefix:
53
- content = f"{model_prefix} {content}"
54
- return {
55
- "role": message.role,
56
- "content": content,
57
- "data": {
58
- "imageBase64": message.content[1]["image_url"]["url"],
59
- "fileText": "",
60
- "title": "snapshot",
61
- },
62
- }
63
- else:
64
- return {"role": message.role, "content": message.content}
65
-
66
- def strip_model_prefix(content: str, model_prefix: str) -> str:
67
- """Remove the model prefix from the response content if present."""
68
- if content.startswith(model_prefix):
69
- return content[len(model_prefix):].strip()
70
- return content
71
-
72
- async def process_streaming_response(request: ChatRequest):
73
- agent_mode = AGENT_MODE.get(request.model, {})
74
- trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
75
- model_prefix = MODEL_PREFIXES.get(request.model, "")
76
- referer_path = MODEL_REFERERS.get(request.model, f"/?model={request.model}")
77
- referer_url = f"{BASE_URL}{referer_path}"
78
-
79
- # Update headers with dynamic Referer
80
- dynamic_headers = headers.copy()
81
- dynamic_headers['Referer'] = referer_url
82
-
83
- json_data = {
84
- "messages": [message_to_dict(msg, model_prefix=model_prefix) for msg in request.messages],
85
- "previewToken": None,
86
- "userId": None,
87
- "codeModelMode": True,
88
- "agentMode": agent_mode,
89
- "trendingAgentMode": trending_agent_mode,
90
- "isMicMode": False,
91
- "userSystemPrompt": None,
92
- "maxTokens": request.max_tokens,
93
- "playgroundTopP": request.top_p,
94
- "playgroundTemperature": request.temperature,
95
- "isChromeExt": False,
96
- "githubToken": None,
97
- "clickedAnswer2": False,
98
- "clickedAnswer3": False,
99
- "clickedForceWebSearch": False,
100
- "visitFromDelta": False,
101
- "mobileClient": False,
102
- "userSelectedModel": MODEL_MAPPING.get(request.model, request.model),
103
- }
104
-
105
- async with httpx.AsyncClient() as client:
106
- try:
107
- async with client.stream(
108
- "POST",
109
- f"{BASE_URL}/api/chat",
110
- headers=dynamic_headers,
111
- json=json_data,
112
- timeout=100,
113
- ) as response:
114
- response.raise_for_status()
115
- async for line in response.aiter_lines():
116
- timestamp = int(datetime.now().timestamp())
117
- if line:
118
- content = line
119
- if content.startswith("$@$v=undefined-rv1$@$"):
120
- content = content[21:]
121
- # Strip the model prefix from the response content
122
- cleaned_content = strip_model_prefix(content, model_prefix)
123
- yield f"data: {json.dumps(create_chat_completion_data(cleaned_content, request.model, timestamp))}\n\n"
124
-
125
- yield f"data: {json.dumps(create_chat_completion_data('', request.model, timestamp, 'stop'))}\n\n"
126
- yield "data: [DONE]\n\n"
127
- except httpx.HTTPStatusError as e:
128
- logger.error(f"HTTP error occurred: {e}")
129
- raise HTTPException(status_code=e.response.status_code, detail=str(e))
130
- except httpx.RequestError as e:
131
- logger.error(f"Error occurred during request: {e}")
132
- raise HTTPException(status_code=500, detail=str(e))
133
-
134
- async def process_non_streaming_response(request: ChatRequest):
135
- agent_mode = AGENT_MODE.get(request.model, {})
136
- trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
137
- model_prefix = MODEL_PREFIXES.get(request.model, "")
138
- referer_path = MODEL_REFERERS.get(request.model, f"/?model={request.model}")
139
- referer_url = f"{BASE_URL}{referer_path}"
140
-
141
- # Update headers with dynamic Referer
142
- dynamic_headers = headers.copy()
143
- dynamic_headers['Referer'] = referer_url
144
-
145
- json_data = {
146
- "messages": [message_to_dict(msg, model_prefix=model_prefix) for msg in request.messages],
147
- "previewToken": None,
148
- "userId": None,
149
- "codeModelMode": True,
150
- "agentMode": agent_mode,
151
- "trendingAgentMode": trending_agent_mode,
152
- "isMicMode": False,
153
- "userSystemPrompt": None,
154
- "maxTokens": request.max_tokens,
155
- "playgroundTopP": request.top_p,
156
- "playgroundTemperature": request.temperature,
157
- "isChromeExt": False,
158
- "githubToken": None,
159
- "clickedAnswer2": False,
160
- "clickedAnswer3": False,
161
- "clickedForceWebSearch": False,
162
- "visitFromDelta": False,
163
- "mobileClient": False,
164
- "userSelectedModel": MODEL_MAPPING.get(request.model, request.model),
165
- }
166
- full_response = ""
167
- async with httpx.AsyncClient() as client:
168
- try:
169
- async with client.stream(
170
- method="POST", url=f"{BASE_URL}/api/chat", headers=dynamic_headers, json=json_data
171
- ) as response:
172
- response.raise_for_status()
173
- async for chunk in response.aiter_text():
174
- full_response += chunk
175
- except httpx.HTTPStatusError as e:
176
- logger.error(f"HTTP error occurred: {e}")
177
- raise HTTPException(status_code=e.response.status_code, detail=str(e))
178
- except httpx.RequestError as e:
179
- logger.error(f"Error occurred during request: {e}")
180
- raise HTTPException(status_code=500, detail=str(e))
181
- if full_response.startswith("$@$v=undefined-rv1$@$"):
182
- full_response = full_response[21:]
183
-
184
- # Strip the model prefix from the full response
185
- cleaned_full_response = strip_model_prefix(full_response, model_prefix)
186
-
187
- return {
188
- "id": f"chatcmpl-{uuid.uuid4()}",
189
- "object": "chat.completion",
190
- "created": int(datetime.now().timestamp()),
191
- "model": request.model,
192
- "choices": [
193
- {
194
- "index": 0,
195
- "message": {"role": "assistant", "content": cleaned_full_response},
196
- "finish_reason": "stop",
197
- }
198
- ],
199
- "usage": None,
200
- }
 
1
+ from datetime import datetime
2
+ import json
3
+ from typing import Any, Dict, Optional
4
+
5
+ import httpx
6
+ from api.config import (
7
+ MODEL_MAPPING,
8
+ headers,
9
+ AGENT_MODE,
10
+ TRENDING_AGENT_MODE,
11
+ BASE_URL,
12
+ MODEL_PREFIXES,
13
+ MODEL_REFERERS
14
+ )
15
+ from fastapi import HTTPException
16
+ from api.models import ChatRequest
17
+
18
+ from api.logger import setup_logger
19
+
20
+ import uuid # Added import for uuid
21
+
22
+ logger = setup_logger(__name__)
23
+
24
+ def create_chat_completion_data(
25
+ content: str, model: str, timestamp: int, finish_reason: Optional[str] = None
26
+ ) -> Dict[str, Any]:
27
+ return {
28
+ "id": f"chatcmpl-{uuid.uuid4()}",
29
+ "object": "chat.completion.chunk",
30
+ "created": timestamp,
31
+ "model": model,
32
+ "choices": [
33
+ {
34
+ "index": 0,
35
+ "delta": {"content": content, "role": "assistant"},
36
+ "finish_reason": finish_reason,
37
+ }
38
+ ],
39
+ "usage": None,
40
+ }
41
+
42
+ def message_to_dict(message, model_prefix: Optional[str] = None):
43
+ if isinstance(message.content, str):
44
+ content = message.content
45
+ if model_prefix:
46
+ content = f"{model_prefix} {content}"
47
+ return {"role": message.role, "content": content}
48
+ elif isinstance(message.content, list) and len(message.content) == 2:
49
+ content = message.content[0]["text"]
50
+ if model_prefix:
51
+ content = f"{model_prefix} {content}"
52
+ return {
53
+ "role": message.role,
54
+ "content": content,
55
+ "data": {
56
+ "imageBase64": message.content[1]["image_url"]["url"],
57
+ "fileText": "",
58
+ "title": "snapshot",
59
+ },
60
+ }
61
+ else:
62
+ return {"role": message.role, "content": message.content}
63
+
64
+ def strip_model_prefix(content: str, model_prefix: str) -> str:
65
+ """Remove the model prefix from the response content if present."""
66
+ if content.startswith(model_prefix):
67
+ return content[len(model_prefix):].strip()
68
+ return content
69
+
70
+ async def process_streaming_response(request: ChatRequest):
71
+ agent_mode = AGENT_MODE.get(request.model, {})
72
+ trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
73
+ model_prefix = MODEL_PREFIXES.get(request.model, "")
74
+ referer_path = MODEL_REFERERS.get(request.model, f"/?model={request.model}")
75
+ referer_url = f"{BASE_URL}{referer_path}"
76
+
77
+ # Update headers with dynamic Referer
78
+ dynamic_headers = headers.copy()
79
+ dynamic_headers['Referer'] = referer_url
80
+
81
+ json_data = {
82
+ "messages": [message_to_dict(msg, model_prefix=model_prefix) for msg in request.messages],
83
+ "previewToken": None,
84
+ "userId": None,
85
+ "codeModelMode": True,
86
+ "agentMode": agent_mode,
87
+ "trendingAgentMode": trending_agent_mode,
88
+ "isMicMode": False,
89
+ "userSystemPrompt": None,
90
+ "maxTokens": request.max_tokens,
91
+ "playgroundTopP": request.top_p,
92
+ "playgroundTemperature": request.temperature,
93
+ "isChromeExt": False,
94
+ "githubToken": None,
95
+ "clickedAnswer2": False,
96
+ "clickedAnswer3": False,
97
+ "clickedForceWebSearch": False,
98
+ "visitFromDelta": False,
99
+ "mobileClient": False,
100
+ "userSelectedModel": MODEL_MAPPING.get(request.model, request.model),
101
+ }
102
+
103
+ async with httpx.AsyncClient() as client:
104
+ try:
105
+ async with client.stream(
106
+ "POST",
107
+ f"{BASE_URL}/api/chat",
108
+ headers=dynamic_headers,
109
+ json=json_data,
110
+ timeout=100,
111
+ ) as response:
112
+ response.raise_for_status()
113
+ async for line in response.aiter_lines():
114
+ timestamp = int(datetime.now().timestamp())
115
+ if line:
116
+ content = line
117
+ if content.startswith("$@$v=undefined-rv1$@$"):
118
+ content = content[21:]
119
+ # Strip the model prefix from the response content
120
+ cleaned_content = strip_model_prefix(content, model_prefix)
121
+ yield f"data: {json.dumps(create_chat_completion_data(cleaned_content, request.model, timestamp))}\n\n"
122
+
123
+ yield f"data: {json.dumps(create_chat_completion_data('', request.model, timestamp, 'stop'))}\n\n"
124
+ yield "data: [DONE]\n\n"
125
+ except httpx.HTTPStatusError as e:
126
+ logger.error(f"HTTP error occurred: {e}")
127
+ raise HTTPException(status_code=e.response.status_code, detail=str(e))
128
+ except httpx.RequestError as e:
129
+ logger.error(f"Error occurred during request: {e}")
130
+ raise HTTPException(status_code=500, detail=str(e))
131
+
132
+ async def process_non_streaming_response(request: ChatRequest):
133
+ agent_mode = AGENT_MODE.get(request.model, {})
134
+ trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
135
+ model_prefix = MODEL_PREFIXES.get(request.model, "")
136
+ referer_path = MODEL_REFERERS.get(request.model, f"/?model={request.model}")
137
+ referer_url = f"{BASE_URL}{referer_path}"
138
+
139
+ # Update headers with dynamic Referer
140
+ dynamic_headers = headers.copy()
141
+ dynamic_headers['Referer'] = referer_url
142
+
143
+ json_data = {
144
+ "messages": [message_to_dict(msg, model_prefix=model_prefix) for msg in request.messages],
145
+ "previewToken": None,
146
+ "userId": None,
147
+ "codeModelMode": True,
148
+ "agentMode": agent_mode,
149
+ "trendingAgentMode": trending_agent_mode,
150
+ "isMicMode": False,
151
+ "userSystemPrompt": None,
152
+ "maxTokens": request.max_tokens,
153
+ "playgroundTopP": request.top_p,
154
+ "playgroundTemperature": request.temperature,
155
+ "isChromeExt": False,
156
+ "githubToken": None,
157
+ "clickedAnswer2": False,
158
+ "clickedAnswer3": False,
159
+ "clickedForceWebSearch": False,
160
+ "visitFromDelta": False,
161
+ "mobileClient": False,
162
+ "userSelectedModel": MODEL_MAPPING.get(request.model, request.model),
163
+ }
164
+ full_response = ""
165
+ async with httpx.AsyncClient() as client:
166
+ try:
167
+ async with client.stream(
168
+ method="POST", url=f"{BASE_URL}/api/chat", headers=dynamic_headers, json=json_data
169
+ ) as response:
170
+ response.raise_for_status()
171
+ async for chunk in response.aiter_text():
172
+ full_response += chunk
173
+ except httpx.HTTPStatusError as e:
174
+ logger.error(f"HTTP error occurred: {e}")
175
+ raise HTTPException(status_code=e.response.status_code, detail=str(e))
176
+ except httpx.RequestError as e:
177
+ logger.error(f"Error occurred during request: {e}")
178
+ raise HTTPException(status_code=500, detail=str(e))
179
+ if full_response.startswith("$@$v=undefined-rv1$@$"):
180
+ full_response = full_response[21:]
181
+
182
+ # Strip the model prefix from the full response
183
+ cleaned_full_response = strip_model_prefix(full_response, model_prefix)
184
+
185
+ return {
186
+ "id": f"chatcmpl-{uuid.uuid4()}",
187
+ "object": "chat.completion",
188
+ "created": int(datetime.now().timestamp()),
189
+ "model": request.model,
190
+ "choices": [
191
+ {
192
+ "index": 0,
193
+ "message": {"role": "assistant", "content": cleaned_full_response},
194
+ "finish_reason": "stop",
195
+ }
196
+ ],
197
+ "usage": None,
198
+ }