Update main.py
Browse files
main.py
CHANGED
@@ -11,7 +11,7 @@ from collections import defaultdict
|
|
11 |
from typing import List, Dict, Any, Optional, Union
|
12 |
from datetime import datetime
|
13 |
|
14 |
-
from aiohttp import ClientSession, ClientResponseError
|
15 |
from fastapi import FastAPI, HTTPException, Request, Depends, Header
|
16 |
from fastapi.responses import JSONResponse
|
17 |
from pydantic import BaseModel
|
@@ -27,11 +27,18 @@ logger = logging.getLogger(__name__)
|
|
27 |
# Load environment variables
|
28 |
API_KEYS = os.getenv('API_KEYS', '').split(',') # Comma-separated API keys
|
29 |
RATE_LIMIT = int(os.getenv('RATE_LIMIT', '60')) # Requests per minute
|
|
|
30 |
|
31 |
if not API_KEYS or API_KEYS == ['']:
|
32 |
logger.error("No API keys found. Please set the API_KEYS environment variable.")
|
33 |
raise Exception("API_KEYS environment variable not set.")
|
34 |
|
|
|
|
|
|
|
|
|
|
|
|
|
35 |
# Simple in-memory rate limiter based solely on IP addresses
|
36 |
rate_limit_store = defaultdict(lambda: {"count": 0, "timestamp": time.time()})
|
37 |
|
@@ -49,9 +56,11 @@ class Blackbox:
|
|
49 |
supports_message_history = True
|
50 |
|
51 |
default_model = 'blackboxai'
|
|
|
52 |
models = [
|
53 |
default_model,
|
54 |
'blackboxai-pro',
|
|
|
55 |
"llama-3.1-8b",
|
56 |
'llama-3.1-70b',
|
57 |
'llama-3.1-405b',
|
@@ -59,9 +68,25 @@ class Blackbox:
|
|
59 |
'gemini-pro',
|
60 |
'gemini-1.5-flash',
|
61 |
'claude-sonnet-3.5',
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
62 |
]
|
63 |
|
64 |
-
agentMode = {
|
|
|
|
|
|
|
65 |
trendingAgentMode = {
|
66 |
"blackboxai": {},
|
67 |
"gemini-1.5-flash": {'mode': True, 'id': 'Gemini'},
|
@@ -69,6 +94,19 @@ class Blackbox:
|
|
69 |
'llama-3.1-70b': {'mode': True, 'id': "llama-3.1-70b"},
|
70 |
'llama-3.1-405b': {'mode': True, 'id': "llama-3.1-405b"},
|
71 |
'blackboxai-pro': {'mode': True, 'id': "BLACKBOXAI-PRO"},
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
72 |
}
|
73 |
|
74 |
userSelectedModel = {
|
@@ -77,23 +115,12 @@ class Blackbox:
|
|
77 |
'claude-sonnet-3.5': "claude-sonnet-3.5",
|
78 |
}
|
79 |
|
80 |
-
model_prefixes = {
|
81 |
-
'gpt-4o': '@GPT-4o',
|
82 |
-
'gemini-pro': '@Gemini-PRO',
|
83 |
-
'claude-sonnet-3.5': '@Claude-Sonnet-3.5',
|
84 |
-
'blackboxai-pro': '@BLACKBOXAI-PRO',
|
85 |
-
}
|
86 |
-
|
87 |
-
model_referers = {
|
88 |
-
"blackboxai": "/?model=blackboxai",
|
89 |
-
"gpt-4o": "/?model=gpt-4o",
|
90 |
-
"gemini-pro": "/?model=gemini-pro",
|
91 |
-
"claude-sonnet-3.5": "/?model=claude-sonnet-3.5"
|
92 |
-
}
|
93 |
-
|
94 |
model_aliases = {
|
|
|
|
|
95 |
"gemini-flash": "gemini-1.5-flash",
|
96 |
"claude-3.5-sonnet": "claude-sonnet-3.5",
|
|
|
97 |
}
|
98 |
|
99 |
@classmethod
|
@@ -110,31 +137,6 @@ class Blackbox:
|
|
110 |
characters = string.ascii_letters + string.digits
|
111 |
return ''.join(random.choices(characters, k=length))
|
112 |
|
113 |
-
@staticmethod
|
114 |
-
def generate_next_action() -> str:
|
115 |
-
return uuid.uuid4().hex
|
116 |
-
|
117 |
-
@staticmethod
|
118 |
-
def generate_next_router_state_tree() -> str:
|
119 |
-
router_state = [
|
120 |
-
"",
|
121 |
-
{
|
122 |
-
"children": [
|
123 |
-
"(chat)",
|
124 |
-
{
|
125 |
-
"children": [
|
126 |
-
"__PAGE__",
|
127 |
-
{}
|
128 |
-
]
|
129 |
-
}
|
130 |
-
]
|
131 |
-
},
|
132 |
-
None,
|
133 |
-
None,
|
134 |
-
True
|
135 |
-
]
|
136 |
-
return json.dumps(router_state)
|
137 |
-
|
138 |
@staticmethod
|
139 |
def clean_response(text: str) -> str:
|
140 |
pattern = r'^\$\@\$v=undefined-rv1\$\@\$'
|
@@ -142,61 +144,39 @@ class Blackbox:
|
|
142 |
return cleaned_text
|
143 |
|
144 |
@classmethod
|
145 |
-
async def
|
146 |
cls,
|
147 |
model: str,
|
148 |
messages: List[Dict[str, str]],
|
149 |
-
proxy: Optional[str] = None,
|
150 |
**kwargs
|
151 |
) -> str:
|
|
|
|
|
|
|
152 |
model = cls.get_model(model)
|
153 |
-
|
154 |
-
|
155 |
-
|
156 |
|
157 |
-
|
158 |
-
trending_agent_mode = cls.trendingAgentMode.get(model, {})
|
159 |
|
160 |
-
prefix = cls.model_prefixes.get(model, "")
|
161 |
-
|
162 |
formatted_prompt = ""
|
163 |
for message in messages:
|
164 |
role = message.get('role', '').capitalize()
|
165 |
content = message.get('content', '')
|
166 |
if role and content:
|
167 |
formatted_prompt += f"{role}: {content}\n"
|
168 |
-
|
169 |
-
if prefix:
|
170 |
-
formatted_prompt = f"{prefix} {formatted_prompt}".strip()
|
171 |
-
|
172 |
-
referer_path = cls.model_referers.get(model, f"/?model={model}")
|
173 |
-
referer_url = f"{cls.url}{referer_path}"
|
174 |
|
175 |
-
|
|
|
176 |
'accept': '*/*',
|
177 |
'accept-language': 'en-US,en;q=0.9',
|
178 |
-
'cache-control': 'no-cache',
|
179 |
'origin': cls.url,
|
180 |
-
'
|
181 |
-
'
|
182 |
-
'sec-ch-ua': '"Chromium";v="129", "Not=A?Brand";v="8"',
|
183 |
-
'sec-ch-ua-mobile': '?0',
|
184 |
-
'sec-ch-ua-platform': '"Linux"',
|
185 |
-
'sec-fetch-dest': 'empty',
|
186 |
-
'sec-fetch-mode': 'cors',
|
187 |
-
'sec-fetch-site': 'same-origin',
|
188 |
-
'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) '
|
189 |
-
'AppleWebKit/537.36 (KHTML, like Gecko) '
|
190 |
-
'Chrome/129.0.0.0 Safari/537.36'
|
191 |
}
|
192 |
|
193 |
-
|
194 |
-
'Content-Type': 'application/json',
|
195 |
-
'Referer': referer_url
|
196 |
-
}
|
197 |
-
headers_api_chat_combined = {**common_headers, **headers_api_chat}
|
198 |
-
|
199 |
-
payload_api_chat = {
|
200 |
"messages": [
|
201 |
{
|
202 |
"id": chat_id,
|
@@ -208,8 +188,8 @@ class Blackbox:
|
|
208 |
"previewToken": None,
|
209 |
"userId": None,
|
210 |
"codeModelMode": True,
|
211 |
-
"agentMode":
|
212 |
-
"trendingAgentMode":
|
213 |
"isMicMode": False,
|
214 |
"userSystemPrompt": None,
|
215 |
"maxTokens": 1024,
|
@@ -226,16 +206,15 @@ class Blackbox:
|
|
226 |
"userSelectedModel": cls.userSelectedModel.get(model, model)
|
227 |
}
|
228 |
|
229 |
-
async with ClientSession(
|
230 |
try:
|
231 |
async with session.post(
|
232 |
cls.api_endpoint,
|
233 |
-
headers=
|
234 |
-
json=
|
235 |
-
|
236 |
-
|
237 |
-
|
238 |
-
text = await response_api_chat.text()
|
239 |
cleaned_response = cls.clean_response(text)
|
240 |
return cleaned_response
|
241 |
except ClientResponseError as e:
|
@@ -246,9 +225,9 @@ class Blackbox:
|
|
246 |
error_text += f" - {cleaned_error}"
|
247 |
except Exception:
|
248 |
pass
|
249 |
-
|
250 |
except Exception as e:
|
251 |
-
|
252 |
|
253 |
# Custom exception for model not working
|
254 |
class ModelNotWorkingException(Exception):
|
@@ -282,7 +261,7 @@ async def rate_limiter_per_ip(request: Request):
|
|
282 |
else:
|
283 |
if rate_limit_store[client_ip]["count"] >= RATE_LIMIT:
|
284 |
logger.warning(f"Rate limit exceeded for IP address: {client_ip}")
|
285 |
-
raise HTTPException(status_code=429, detail='Rate limit exceeded
|
286 |
rate_limit_store[client_ip]["count"] += 1
|
287 |
|
288 |
async def get_api_key(request: Request, authorization: str = Header(None)) -> str:
|
@@ -362,14 +341,16 @@ async def chat_completions(request: ChatRequest, req: Request, api_key: str = De
|
|
362 |
logger.warning(f"Attempt to use unavailable model: {request.model} from IP: {client_ip}")
|
363 |
raise HTTPException(status_code=400, detail="Requested model is not available.")
|
364 |
|
365 |
-
# Process the request with actual message content, but don't log
|
|
|
366 |
model=request.model,
|
367 |
-
messages=[{"role": msg.role, "content": msg.content} for msg in request.messages],
|
368 |
-
temperature=request.temperature,
|
369 |
-
max_tokens=request.max_tokens
|
370 |
)
|
371 |
|
372 |
logger.info(f"Completed response generation for API key: {api_key} | IP: {client_ip}")
|
|
|
|
|
|
|
373 |
return {
|
374 |
"id": f"chatcmpl-{uuid.uuid4()}",
|
375 |
"object": "chat.completion",
|
@@ -377,12 +358,12 @@ async def chat_completions(request: ChatRequest, req: Request, api_key: str = De
|
|
377 |
"model": request.model,
|
378 |
"choices": [
|
379 |
{
|
380 |
-
"index": 0,
|
381 |
"message": {
|
382 |
"role": "assistant",
|
383 |
"content": response_content
|
384 |
},
|
385 |
-
"finish_reason": "stop"
|
|
|
386 |
}
|
387 |
],
|
388 |
"usage": {
|
|
|
11 |
from typing import List, Dict, Any, Optional, Union
|
12 |
from datetime import datetime
|
13 |
|
14 |
+
from aiohttp import ClientSession, ClientTimeout, ClientError, ClientResponseError
|
15 |
from fastapi import FastAPI, HTTPException, Request, Depends, Header
|
16 |
from fastapi.responses import JSONResponse
|
17 |
from pydantic import BaseModel
|
|
|
27 |
# Load environment variables
|
28 |
API_KEYS = os.getenv('API_KEYS', '').split(',') # Comma-separated API keys
|
29 |
RATE_LIMIT = int(os.getenv('RATE_LIMIT', '60')) # Requests per minute
|
30 |
+
AVAILABLE_MODELS = os.getenv('AVAILABLE_MODELS', '') # Comma-separated available models
|
31 |
|
32 |
if not API_KEYS or API_KEYS == ['']:
|
33 |
logger.error("No API keys found. Please set the API_KEYS environment variable.")
|
34 |
raise Exception("API_KEYS environment variable not set.")
|
35 |
|
36 |
+
# Process available models
|
37 |
+
if AVAILABLE_MODELS:
|
38 |
+
AVAILABLE_MODELS = [model.strip() for model in AVAILABLE_MODELS.split(',') if model.strip()]
|
39 |
+
else:
|
40 |
+
AVAILABLE_MODELS = [] # If empty, all models are available
|
41 |
+
|
42 |
# Simple in-memory rate limiter based solely on IP addresses
|
43 |
rate_limit_store = defaultdict(lambda: {"count": 0, "timestamp": time.time()})
|
44 |
|
|
|
56 |
supports_message_history = True
|
57 |
|
58 |
default_model = 'blackboxai'
|
59 |
+
image_models = ['ImageGeneration']
|
60 |
models = [
|
61 |
default_model,
|
62 |
'blackboxai-pro',
|
63 |
+
*image_models,
|
64 |
"llama-3.1-8b",
|
65 |
'llama-3.1-70b',
|
66 |
'llama-3.1-405b',
|
|
|
68 |
'gemini-pro',
|
69 |
'gemini-1.5-flash',
|
70 |
'claude-sonnet-3.5',
|
71 |
+
'PythonAgent',
|
72 |
+
'JavaAgent',
|
73 |
+
'JavaScriptAgent',
|
74 |
+
'HTMLAgent',
|
75 |
+
'GoogleCloudAgent',
|
76 |
+
'AndroidDeveloper',
|
77 |
+
'SwiftDeveloper',
|
78 |
+
'Next.jsAgent',
|
79 |
+
'MongoDBAgent',
|
80 |
+
'PyTorchAgent',
|
81 |
+
'ReactAgent',
|
82 |
+
'XcodeAgent',
|
83 |
+
'AngularJSAgent',
|
84 |
]
|
85 |
|
86 |
+
agentMode = {
|
87 |
+
'ImageGeneration': {'mode': True, 'id': "ImageGenerationLV45LJp", 'name': "Image Generation"},
|
88 |
+
}
|
89 |
+
|
90 |
trendingAgentMode = {
|
91 |
"blackboxai": {},
|
92 |
"gemini-1.5-flash": {'mode': True, 'id': 'Gemini'},
|
|
|
94 |
'llama-3.1-70b': {'mode': True, 'id': "llama-3.1-70b"},
|
95 |
'llama-3.1-405b': {'mode': True, 'id': "llama-3.1-405b"},
|
96 |
'blackboxai-pro': {'mode': True, 'id': "BLACKBOXAI-PRO"},
|
97 |
+
'PythonAgent': {'mode': True, 'id': "Python Agent"},
|
98 |
+
'JavaAgent': {'mode': True, 'id': "Java Agent"},
|
99 |
+
'JavaScriptAgent': {'mode': True, 'id': "JavaScript Agent"},
|
100 |
+
'HTMLAgent': {'mode': True, 'id': "HTML Agent"},
|
101 |
+
'GoogleCloudAgent': {'mode': True, 'id': "Google Cloud Agent"},
|
102 |
+
'AndroidDeveloper': {'mode': True, 'id': "Android Developer"},
|
103 |
+
'SwiftDeveloper': {'mode': True, 'id': "Swift Developer"},
|
104 |
+
'Next.jsAgent': {'mode': True, 'id': "Next.js Agent"},
|
105 |
+
'MongoDBAgent': {'mode': True, 'id': "MongoDB Agent"},
|
106 |
+
'PyTorchAgent': {'mode': True, 'id': "PyTorch Agent"},
|
107 |
+
'ReactAgent': {'mode': True, 'id': "React Agent"},
|
108 |
+
'XcodeAgent': {'mode': True, 'id': "Xcode Agent"},
|
109 |
+
'AngularJSAgent': {'mode': True, 'id': "AngularJS Agent"},
|
110 |
}
|
111 |
|
112 |
userSelectedModel = {
|
|
|
115 |
'claude-sonnet-3.5': "claude-sonnet-3.5",
|
116 |
}
|
117 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
118 |
model_aliases = {
|
119 |
+
"gpt-3.5-turbo": "blackboxai",
|
120 |
+
"gpt-4": "gpt-4o",
|
121 |
"gemini-flash": "gemini-1.5-flash",
|
122 |
"claude-3.5-sonnet": "claude-sonnet-3.5",
|
123 |
+
"flux": "ImageGeneration",
|
124 |
}
|
125 |
|
126 |
@classmethod
|
|
|
137 |
characters = string.ascii_letters + string.digits
|
138 |
return ''.join(random.choices(characters, k=length))
|
139 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
140 |
@staticmethod
|
141 |
def clean_response(text: str) -> str:
|
142 |
pattern = r'^\$\@\$v=undefined-rv1\$\@\$'
|
|
|
144 |
return cleaned_text
|
145 |
|
146 |
@classmethod
|
147 |
+
async def create_completion(
|
148 |
cls,
|
149 |
model: str,
|
150 |
messages: List[Dict[str, str]],
|
|
|
151 |
**kwargs
|
152 |
) -> str:
|
153 |
+
"""
|
154 |
+
Creates a completion using the Blackbox AI API.
|
155 |
+
"""
|
156 |
model = cls.get_model(model)
|
157 |
+
if model is None:
|
158 |
+
logger.error(f"Model {model} is not available.")
|
159 |
+
raise ModelNotWorkingException(model)
|
160 |
|
161 |
+
chat_id = cls.generate_random_string()
|
|
|
162 |
|
|
|
|
|
163 |
formatted_prompt = ""
|
164 |
for message in messages:
|
165 |
role = message.get('role', '').capitalize()
|
166 |
content = message.get('content', '')
|
167 |
if role and content:
|
168 |
formatted_prompt += f"{role}: {content}\n"
|
|
|
|
|
|
|
|
|
|
|
|
|
169 |
|
170 |
+
headers = {
|
171 |
+
'Content-Type': 'application/json',
|
172 |
'accept': '*/*',
|
173 |
'accept-language': 'en-US,en;q=0.9',
|
|
|
174 |
'origin': cls.url,
|
175 |
+
'referer': f"{cls.url}/?model={model}",
|
176 |
+
'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/129.0.0.0 Safari/537.36'
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
177 |
}
|
178 |
|
179 |
+
payload = {
|
|
|
|
|
|
|
|
|
|
|
|
|
180 |
"messages": [
|
181 |
{
|
182 |
"id": chat_id,
|
|
|
188 |
"previewToken": None,
|
189 |
"userId": None,
|
190 |
"codeModelMode": True,
|
191 |
+
"agentMode": cls.agentMode.get(model, {}),
|
192 |
+
"trendingAgentMode": cls.trendingAgentMode.get(model, {}),
|
193 |
"isMicMode": False,
|
194 |
"userSystemPrompt": None,
|
195 |
"maxTokens": 1024,
|
|
|
206 |
"userSelectedModel": cls.userSelectedModel.get(model, model)
|
207 |
}
|
208 |
|
209 |
+
async with ClientSession() as session:
|
210 |
try:
|
211 |
async with session.post(
|
212 |
cls.api_endpoint,
|
213 |
+
headers=headers,
|
214 |
+
json=payload
|
215 |
+
) as response:
|
216 |
+
response.raise_for_status()
|
217 |
+
text = await response.text()
|
|
|
218 |
cleaned_response = cls.clean_response(text)
|
219 |
return cleaned_response
|
220 |
except ClientResponseError as e:
|
|
|
225 |
error_text += f" - {cleaned_error}"
|
226 |
except Exception:
|
227 |
pass
|
228 |
+
raise HTTPException(status_code=e.status, detail=error_text)
|
229 |
except Exception as e:
|
230 |
+
raise HTTPException(status_code=500, detail=f"Unexpected error: {str(e)}")
|
231 |
|
232 |
# Custom exception for model not working
|
233 |
class ModelNotWorkingException(Exception):
|
|
|
261 |
else:
|
262 |
if rate_limit_store[client_ip]["count"] >= RATE_LIMIT:
|
263 |
logger.warning(f"Rate limit exceeded for IP address: {client_ip}")
|
264 |
+
raise HTTPException(status_code=429, detail='Rate limit exceeded')
|
265 |
rate_limit_store[client_ip]["count"] += 1
|
266 |
|
267 |
async def get_api_key(request: Request, authorization: str = Header(None)) -> str:
|
|
|
341 |
logger.warning(f"Attempt to use unavailable model: {request.model} from IP: {client_ip}")
|
342 |
raise HTTPException(status_code=400, detail="Requested model is not available.")
|
343 |
|
344 |
+
# Process the request with actual message content, but don't log it
|
345 |
+
response_content = await Blackbox.create_completion(
|
346 |
model=request.model,
|
347 |
+
messages=[{"role": msg.role, "content": msg.content} for msg in request.messages], # Actual message content used here
|
|
|
|
|
348 |
)
|
349 |
|
350 |
logger.info(f"Completed response generation for API key: {api_key} | IP: {client_ip}")
|
351 |
+
return {
|
352 |
+
"id": f"chatcmpl-{uuid.uuid4()}",
|
353 |
+
"object": "chat.logger.info(f"Completed response generation for API key: {api_key} | IP: {client_ip}")
|
354 |
return {
|
355 |
"id": f"chatcmpl-{uuid.uuid4()}",
|
356 |
"object": "chat.completion",
|
|
|
358 |
"model": request.model,
|
359 |
"choices": [
|
360 |
{
|
|
|
361 |
"message": {
|
362 |
"role": "assistant",
|
363 |
"content": response_content
|
364 |
},
|
365 |
+
"finish_reason": "stop",
|
366 |
+
"index": 0
|
367 |
}
|
368 |
],
|
369 |
"usage": {
|