Niansuh commited on
Commit
711a467
·
verified ·
1 Parent(s): 3021f30

Update api/utils.py

Browse files
Files changed (1) hide show
  1. api/utils.py +252 -173
api/utils.py CHANGED
@@ -1,185 +1,264 @@
1
- # Blackbox.py
2
 
3
- from __future__ import annotations
4
-
5
- from aiohttp import ClientSession
6
- import uuid
7
  import json
8
- import re
9
-
10
- from ..typing import AsyncResult, Messages, ImageType
11
- from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
12
- from ..image import ImageResponse, to_data_uri
13
-
14
- class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
15
- label = "Blackbox AI"
16
- url = "https://www.blackbox.ai"
17
- api_endpoint = "https://www.blackbox.ai/api/chat"
18
- working = True
19
- supports_stream = True
20
- supports_system_message = True
21
- supports_message_history = True
22
-
23
- default_model = 'blackboxai'
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
24
 
25
- image_models = ['Image Generation', 'repomap']
26
-
27
- userSelectedModel = ['gpt-4o', 'gemini-pro', 'claude-sonnet-3.5', 'blackboxai-pro']
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
28
 
29
- agentMode = {
30
- 'Image Generation': {'mode': True, 'id': "ImageGenerationLV45LJp", 'name': "Image Generation"},
 
 
 
 
 
 
 
 
 
 
 
 
31
  }
32
-
33
- trendingAgentMode = {
34
- "gemini-1.5-flash": {'mode': True, 'id': 'Gemini'},
35
- "llama-3.1-8b": {'mode': True, 'id': "llama-3.1-8b"},
36
- 'llama-3.1-70b': {'mode': True, 'id': "llama-3.1-70b"},
37
- 'llama-3.1-405b': {'mode': True, 'id': "llama-3.1-405"},
38
- #
39
- 'Python Agent': {'mode': True, 'id': "Python Agent"},
40
- 'Java Agent': {'mode': True, 'id': "Java Agent"},
41
- 'JavaScript Agent': {'mode': True, 'id': "JavaScript Agent"},
42
- 'HTML Agent': {'mode': True, 'id': "HTML Agent"},
43
- 'Google Cloud Agent': {'mode': True, 'id': "Google Cloud Agent"},
44
- 'Android Developer': {'mode': True, 'id': "Android Developer"},
45
- 'Swift Developer': {'mode': True, 'id': "Swift Developer"},
46
- 'Next.js Agent': {'mode': True, 'id': "Next.js Agent"},
47
- 'MongoDB Agent': {'mode': True, 'id': "MongoDB Agent"},
48
- 'PyTorch Agent': {'mode': True, 'id': "PyTorch Agent"},
49
- 'React Agent': {'mode': True, 'id': "React Agent"},
50
- 'Xcode Agent': {'mode': True, 'id': "Xcode Agent"},
51
- 'AngularJS Agent': {'mode': True, 'id': "AngularJS Agent"},
52
- 'blackboxai-pro': {'mode': True, 'id': "BLACKBOXAI-PRO"},
53
- #
54
- 'repomap': {'mode': True, 'id': "repomap"},
55
- #
56
- 'Heroku Agent': {'mode': True, 'id': "Heroku Agent"},
57
- 'Godot Agent': {'mode': True, 'id': "Godot Agent"},
58
- 'Go Agent': {'mode': True, 'id': "Go Agent"},
59
- 'Gitlab Agent': {'mode': True, 'id': "Gitlab Agent"},
60
- 'Git Agent': {'mode': True, 'id': "Git Agent"},
61
- 'Flask Agent': {'mode': True, 'id': "Flask Agent"},
62
- 'Firebase Agent': {'mode': True, 'id': "Firebase Agent"},
63
- 'FastAPI Agent': {'mode': True, 'id': "FastAPI Agent"},
64
- 'Erlang Agent': {'mode': True, 'id': "Erlang Agent"},
65
- 'Electron Agent': {'mode': True, 'id': "Electron Agent"},
66
- 'Docker Agent': {'mode': True, 'id': "Docker Agent"},
67
- 'DigitalOcean Agent': {'mode': True, 'id': "DigitalOcean Agent"},
68
- 'Bitbucket Agent': {'mode': True, 'id': "Bitbucket Agent"},
69
- 'Azure Agent': {'mode': True, 'id': "Azure Agent"},
70
- 'Flutter Agent': {'mode': True, 'id': "Flutter Agent"},
71
- 'Youtube Agent': {'mode': True, 'id': "Youtube Agent"},
72
- 'builder Agent': {'mode': True, 'id': "builder Agent"},
73
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
74
 
75
- models = [default_model, *userSelectedModel, *list(agentMode.keys()), *list(trendingAgentMode.keys())]
76
-
77
- model_aliases = {
78
- "gemini-flash": "gemini-1.5-flash",
79
- "claude-3.5-sonnet": "claude-sonnet-3.5",
80
- "flux": "Image Generation",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
81
  }
82
 
83
- @staticmethod
84
- def generate_id() -> str:
85
- """Generate a UUID-based message ID."""
86
- return str(uuid.uuid4())
87
-
88
- @classmethod
89
- def get_model(cls, model: str) -> str:
90
- if model in cls.models:
91
- return model
92
- elif model in cls.model_aliases:
93
- return cls.model_aliases[model]
94
- else:
95
- return cls.default_model
96
-
97
- @classmethod
98
- async def create_async_generator(
99
- cls,
100
- model: str,
101
- messages: Messages,
102
- proxy: str = None,
103
- web_search: bool = False,
104
- image: ImageType = None,
105
- image_name: str = None,
106
- **kwargs
107
- ) -> AsyncResult:
108
- model = cls.get_model(model)
109
- message_id = cls.generate_id()
110
- # Removed add_prefix_to_messages since prefixes are no longer used
111
- messages_processed = messages.copy()
112
-
113
- if image is not None:
114
- messages_processed[-1]['data'] = {
115
- 'fileText': '',
116
- 'imageBase64': to_data_uri(image),
117
- 'title': image_name
118
- }
119
 
120
- headers = {
121
- 'Accept': '*/*',
122
- 'Accept-Language': 'en-US,en;q=0.9',
123
- 'Cache-Control': 'no-cache',
124
- 'Content-Type': 'application/json',
125
- 'Origin': cls.url,
126
- 'Pragma': 'no-cache',
127
- 'Priority': 'u=1, i',
128
- 'Referer': f'{cls.url}/',
129
- 'User-Agent': (
130
- 'Mozilla/5.0 (X11; Linux x86_64) '
131
- 'AppleWebKit/537.36 (KHTML, like Gecko) '
132
- 'Chrome/130.0.0.0 Safari/537.36'
133
- )
134
- }
135
-
136
- data = {
137
- "messages": messages_processed,
138
- "id": message_id,
139
- "previewToken": None,
140
- "userId": None,
141
- "codeModelMode": True,
142
- "agentMode": cls.agentMode.get(model, {}) if model in cls.agentMode else {},
143
- "trendingAgentMode": cls.trendingAgentMode.get(model, {}) if model in cls.trendingAgentMode else {},
144
- "isMicMode": False,
145
- "userSystemPrompt": None,
146
- "maxTokens": 1024,
147
- "playgroundTopP": 0.9,
148
- "playgroundTemperature": 0.5,
149
- "isChromeExt": False,
150
- "githubToken": None,
151
- "clickedAnswer2": False,
152
- "clickedAnswer3": False,
153
- "clickedForceWebSearch": False,
154
- "visitFromDelta": False,
155
- "mobileClient": False,
156
- "userSelectedModel": model if model in cls.userSelectedModel else None,
157
- "webSearchMode": web_search,
158
- "validated": "69783381-2ce4-4dbd-ac78-35e9063feabc"
159
- }
160
 
161
- async with ClientSession(headers=headers) as session:
162
- async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response:
 
 
 
 
163
  response.raise_for_status()
164
- response_text = await response.text()
165
-
166
- if model in cls.image_models:
167
- image_matches = re.findall(r'!\[.*?\]\((https?://[^\)]+)\)', response_text)
168
- if image_matches:
169
- image_url = image_matches[0]
170
- image_response = ImageResponse(images=[image_url], alt="Generated Image")
171
- yield image_response
172
- return
173
-
174
- json_match = re.search(r'\$~~~\$(.*?)\$~~~\$', response_text, re.DOTALL)
175
- if json_match:
176
- search_results = json.loads(json_match.group(1))
177
- answer = response_text.split('$~~~$')[-1].strip()
178
-
179
- formatted_response = f"{answer}\n\n**Source:**"
180
- for i, result in enumerate(search_results, 1):
181
- formatted_response += f"\n{i}. {result['title']}: {result['link']}"
182
-
183
- yield formatted_response
184
- else:
185
- yield response_text.strip()
 
 
 
 
 
 
 
1
+ # utils.py
2
 
3
+ from datetime import datetime
 
 
 
4
  import json
5
+ import uuid
6
+ import asyncio
7
+ import random
8
+ import string
9
+ from typing import Any, Dict, Optional
10
+
11
+ import httpx
12
+ from fastapi import HTTPException
13
+ from api.config import (
14
+ MODEL_MAPPING,
15
+ BASE_URL,
16
+ AGENT_MODE,
17
+ TRENDING_AGENT_MODE,
18
+ )
19
+ from api.models import ChatRequest
20
+ from api.logger import setup_logger
21
+
22
+ logger = setup_logger(__name__)
23
+
24
+ # Helper function to create a UUID-based chat ID
25
+ def generate_chat_id() -> str:
26
+ return str(uuid.uuid4())
27
+
28
+ # Helper function to create chat completion data
29
+ def create_chat_completion_data(
30
+ content: str, model: str, timestamp: int, finish_reason: Optional[str] = None
31
+ ) -> Dict[str, Any]:
32
+ return {
33
+ "id": f"chatcmpl-{uuid.uuid4()}",
34
+ "object": "chat.completion.chunk",
35
+ "created": timestamp,
36
+ "model": model,
37
+ "choices": [
38
+ {
39
+ "index": 0,
40
+ "delta": {"content": content, "role": "assistant"},
41
+ "finish_reason": finish_reason,
42
+ }
43
+ ],
44
+ "usage": None,
45
+ }
46
+
47
+ # Function to convert message to dictionary format, ensuring base64 data
48
+ def message_to_dict(message):
49
+ if isinstance(message.content, str):
50
+ content = message.content
51
+ elif isinstance(message.content, list) and len(message.content) > 0:
52
+ content = message.content[0].get("text", "")
53
+ else:
54
+ content = ""
55
 
56
+ if isinstance(message.content, list) and len(message.content) == 2 and "image_url" in message.content[1]:
57
+ # Ensure base64 images are always included for all models
58
+ return {
59
+ "role": message.role,
60
+ "content": content,
61
+ "data": {
62
+ "imageBase64": message.content[1]["image_url"]["url"],
63
+ "fileText": "",
64
+ "title": "snapshot",
65
+ },
66
+ }
67
+ return {"role": message.role, "content": content}
68
+
69
+ # Function to strip model prefix from content if present
70
+ def strip_model_prefix(content: str) -> str:
71
+ """Function retained but no model_prefix is used."""
72
+ # Since model_prefix is removed, this function can simply return content
73
+ return content
74
+
75
+ # Function to get the correct referer URL for logging
76
+ def get_referer_url(chat_id: str, model: str) -> str:
77
+ """Generate the referer URL based on specific models listed in MODEL_REFERERS."""
78
+ # MODEL_REFERERS has been removed; referer_url defaults to BASE_URL
79
+ return BASE_URL
80
+
81
+ # Process streaming response without model prefixes and referers
82
+ async def process_streaming_response(request: ChatRequest):
83
+ chat_id = generate_chat_id()
84
+ referer_url = get_referer_url(chat_id, request.model)
85
+ logger.info(f"Generated Chat ID: {chat_id} - Model: {request.model} - URL: {referer_url}")
86
+
87
+ agent_mode = AGENT_MODE.get(request.model, {})
88
+ trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
89
 
90
+ headers_api_chat = {
91
+ 'Accept': '*/*',
92
+ 'Accept-Language': 'en-US,en;q=0.9',
93
+ 'Cache-Control': 'no-cache',
94
+ 'Origin': BASE_URL,
95
+ 'Pragma': 'no-cache',
96
+ 'Priority': 'u=1, i',
97
+ 'User-Agent': (
98
+ 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) '
99
+ 'AppleWebKit/537.36 (KHTML, like Gecko) '
100
+ 'Chrome/130.0.0.0 Safari/537.36'
101
+ ),
102
+ 'Content-Type': 'application/json',
103
+ 'Referer': referer_url, # Retain referer if necessary
104
  }
105
+
106
+ if request.model == 'o1-preview':
107
+ delay_seconds = random.randint(1, 60)
108
+ logger.info(f"Introducing a delay of {delay_seconds} seconds for model 'o1-preview' (Chat ID: {chat_id})")
109
+ await asyncio.sleep(delay_seconds)
110
+
111
+ json_data = {
112
+ "agentMode": agent_mode,
113
+ "clickedAnswer2": False,
114
+ "clickedAnswer3": False,
115
+ "clickedForceWebSearch": False,
116
+ "codeModelMode": True,
117
+ "githubToken": None,
118
+ "id": chat_id,
119
+ "isChromeExt": False,
120
+ "isMicMode": False,
121
+ "maxTokens": request.max_tokens,
122
+ "messages": [message_to_dict(msg) for msg in request.messages],
123
+ "mobileClient": False,
124
+ "playgroundTemperature": request.temperature,
125
+ "playgroundTopP": request.top_p,
126
+ "previewToken": None,
127
+ "trendingAgentMode": trending_agent_mode,
128
+ "userId": None,
129
+ "userSelectedModel": MODEL_MAPPING.get(request.model, request.model),
130
+ "userSystemPrompt": None,
131
+ "validated": "69783381-2ce4-4dbd-ac78-35e9063feabc",
132
+ "visitFromDelta": False,
 
 
 
 
 
 
 
 
 
 
 
 
 
133
  }
134
+
135
+ async with httpx.AsyncClient() as client:
136
+ try:
137
+ async with client.stream(
138
+ "POST",
139
+ f"{BASE_URL}/api/chat",
140
+ headers=headers_api_chat,
141
+ json=json_data,
142
+ timeout=100,
143
+ ) as response:
144
+ response.raise_for_status()
145
+ async for line in response.aiter_lines():
146
+ timestamp = int(datetime.now().timestamp())
147
+ if line:
148
+ content = line
149
+ if content.startswith("$@$v=undefined-rv1$@$"):
150
+ content = content[21:]
151
+ cleaned_content = strip_model_prefix(content)
152
+ yield f"data: {json.dumps(create_chat_completion_data(cleaned_content, request.model, timestamp))}\n\n"
153
+
154
+ yield f"data: {json.dumps(create_chat_completion_data('', request.model, timestamp, 'stop'))}\n\n"
155
+ yield "data: [DONE]\n\n"
156
+ except httpx.HTTPStatusError as e:
157
+ logger.error(f"HTTP error occurred for Chat ID {chat_id}: {e}")
158
+ raise HTTPException(status_code=e.response.status_code, detail=str(e))
159
+ except httpx.RequestError as e:
160
+ logger.error(f"Error occurred during request for Chat ID {chat_id}: {e}")
161
+ raise HTTPException(status_code=500, detail=str(e))
162
+
163
+ # Process non-streaming response without model prefixes and referers
164
+ async def process_non_streaming_response(request: ChatRequest):
165
+ chat_id = generate_chat_id()
166
+ referer_url = get_referer_url(chat_id, request.model)
167
+ logger.info(f"Generated Chat ID: {chat_id} - Model: {request.model} - URL: {referer_url}")
168
+
169
+ agent_mode = AGENT_MODE.get(request.model, {})
170
+ trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
171
 
172
+ headers_api_chat = {
173
+ 'Accept': '*/*',
174
+ 'Accept-Language': 'en-US,en;q=0.9',
175
+ 'Cache-Control': 'no-cache',
176
+ 'Origin': BASE_URL,
177
+ 'Pragma': 'no-cache',
178
+ 'Priority': 'u=1, i',
179
+ 'User-Agent': (
180
+ 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) '
181
+ 'AppleWebKit/537.36 (KHTML, like Gecko) '
182
+ 'Chrome/130.0.0.0 Safari/537.36'
183
+ ),
184
+ 'Content-Type': 'application/json',
185
+ 'Referer': referer_url, # Retain referer if necessary
186
+ }
187
+ headers_chat = {
188
+ 'Accept': '*/*',
189
+ 'Accept-Language': 'en-US,en;q=0.9',
190
+ 'Cache-Control': 'no-cache',
191
+ 'Content-Type': 'text/plain;charset=UTF-8',
192
+ 'Origin': BASE_URL,
193
+ 'Pragma': 'no-cache',
194
+ 'Priority': 'u=1, i',
195
+ 'Referer': referer_url,
196
+ 'Next-Action': str(uuid.uuid4()),
197
+ 'Next-Router-State-Tree': json.dumps([""]),
198
+ 'Next-URL': '/',
199
  }
200
 
201
+ if request.model == 'o1-preview':
202
+ delay_seconds = random.randint(20, 60)
203
+ logger.info(f"Introducing a delay of {delay_seconds} seconds for model 'o1-preview' (Chat ID: {chat_id})")
204
+ await asyncio.sleep(delay_seconds)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
205
 
206
+ json_data = {
207
+ "agentMode": agent_mode,
208
+ "clickedAnswer2": False,
209
+ "clickedAnswer3": False,
210
+ "clickedForceWebSearch": False,
211
+ "codeModelMode": True,
212
+ "githubToken": None,
213
+ "id": chat_id,
214
+ "isChromeExt": False,
215
+ "isMicMode": False,
216
+ "maxTokens": request.max_tokens,
217
+ "messages": [message_to_dict(msg) for msg in request.messages],
218
+ "mobileClient": False,
219
+ "playgroundTemperature": request.temperature,
220
+ "playgroundTopP": request.top_p,
221
+ "previewToken": None,
222
+ "trendingAgentMode": trending_agent_mode,
223
+ "userId": None,
224
+ "userSelectedModel": MODEL_MAPPING.get(request.model, request.model),
225
+ "userSystemPrompt": None,
226
+ "validated": "69783381-2ce4-4dbd-ac78-35e9063feabc",
227
+ "visitFromDelta": False,
228
+ }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
229
 
230
+ full_response = ""
231
+ async with httpx.AsyncClient() as client:
232
+ try:
233
+ async with client.stream(
234
+ method="POST", url=f"{BASE_URL}/api/chat", headers=headers_api_chat, json=json_data
235
+ ) as response:
236
  response.raise_for_status()
237
+ async for chunk in response.aiter_text():
238
+ full_response += chunk
239
+ except httpx.HTTPStatusError as e:
240
+ logger.error(f"HTTP error occurred for Chat ID {chat_id}: {e}")
241
+ raise HTTPException(status_code=e.response.status_code, detail=str(e))
242
+ except httpx.RequestError as e:
243
+ logger.error(f"Error occurred during request for Chat ID {chat_id}: {e}")
244
+ raise HTTPException(status_code=500, detail=str(e))
245
+
246
+ if full_response.startswith("$@$v=undefined-rv1$@$"):
247
+ full_response = full_response[21:]
248
+
249
+ cleaned_full_response = strip_model_prefix(full_response)
250
+
251
+ return {
252
+ "id": f"chatcmpl-{uuid.uuid4()}",
253
+ "object": "chat.completion",
254
+ "created": int(datetime.now().timestamp()),
255
+ "model": request.model,
256
+ "choices": [
257
+ {
258
+ "index": 0,
259
+ "message": {"role": "assistant", "content": cleaned_full_response},
260
+ "finish_reason": "stop",
261
+ }
262
+ ],
263
+ "usage": None,
264
+ }