Niansuh commited on
Commit
3021f30
·
verified ·
1 Parent(s): e460d39

Update api/utils.py

Browse files
Files changed (1) hide show
  1. api/utils.py +173 -246
api/utils.py CHANGED
@@ -1,258 +1,185 @@
1
- from datetime import datetime
2
- import json
3
- import uuid
4
- import asyncio
5
- import random
6
- import string
7
- from typing import Any, Dict, Optional
8
-
9
- import httpx
10
- from fastapi import HTTPException
11
- from api.config import (
12
- MODEL_MAPPING,
13
- BASE_URL,
14
- AGENT_MODE,
15
- TRENDING_AGENT_MODE,
16
- )
17
- from api.models import ChatRequest
18
- from api.logger import setup_logger
19
 
20
- logger = setup_logger(__name__)
21
 
22
- # Helper function to create a random UUID-based chat ID
23
- def generate_chat_id() -> str:
24
- return str(uuid.uuid4())
25
-
26
- # Helper function to create chat completion data
27
- def create_chat_completion_data(
28
- content: str, model: str, timestamp: int, finish_reason: Optional[str] = None
29
- ) -> Dict[str, Any]:
30
- return {
31
- "id": f"chatcmpl-{uuid.uuid4()}",
32
- "object": "chat.completion.chunk",
33
- "created": timestamp,
34
- "model": model,
35
- "choices": [
36
- {
37
- "index": 0,
38
- "delta": {"content": content, "role": "assistant"},
39
- "finish_reason": finish_reason,
40
- }
41
- ],
42
- "usage": None,
43
- }
44
-
45
- # Function to convert message to dictionary format, ensuring base64 data
46
- def message_to_dict(message):
47
- if isinstance(message.content, str):
48
- content = message.content
49
- elif isinstance(message.content, list) and len(message.content) > 0:
50
- content = message.content[0].get("text", "")
51
- else:
52
- content = ""
53
 
54
- if isinstance(message.content, list) and len(message.content) == 2 and "image_url" in message.content[1]:
55
- # Ensure base64 images are always included for all models
56
- return {
57
- "role": message.role,
58
- "content": content,
59
- "data": {
60
- "imageBase64": message.content[1]["image_url"]["url"],
61
- "fileText": "",
62
- "title": "snapshot",
63
- },
64
- }
65
- return {"role": message.role, "content": content}
66
-
67
- # Function to strip model prefix from content if present
68
- def strip_model_prefix(content: str) -> str:
69
- """Function retained but no model_prefix is used."""
70
- # Since model_prefix is removed, this function can simply return content
71
- return content
72
-
73
- # Function to get the correct referer URL for logging
74
- def get_referer_url(chat_id: str, model: str) -> str:
75
- """Generate the referer URL based on specific models listed in MODEL_REFERERS."""
76
- # MODEL_REFERERS has been removed; referer_url defaults to BASE_URL
77
- return BASE_URL
78
-
79
- # Process streaming response without model prefixes and referers
80
- async def process_streaming_response(request: ChatRequest):
81
- chat_id = generate_chat_id()
82
- referer_url = get_referer_url(chat_id, request.model)
83
- logger.info(f"Generated Chat ID: {chat_id} - Model: {request.model} - URL: {referer_url}")
84
-
85
- agent_mode = AGENT_MODE.get(request.model, {})
86
- trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
87
 
88
- headers_api_chat = {
89
- 'accept': '*/*',
90
- 'accept-language': 'en-US,en;q=0.9',
91
- 'cache-control': 'no-cache',
92
- 'origin': BASE_URL,
93
- 'pragma': 'no-cache',
94
- 'priority': 'u=1, i',
95
- 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) '
96
- 'AppleWebKit/537.36 (KHTML, like Gecko) '
97
- 'Chrome/130.0.0.0 Safari/537.36',
98
- 'Content-Type': 'application/json',
99
- 'Referer': referer_url, # Retain referer if necessary
100
- }
101
-
102
- if request.model == 'o1-preview':
103
- delay_seconds = random.randint(1, 60)
104
- logger.info(f"Introducing a delay of {delay_seconds} seconds for model 'o1-preview' (Chat ID: {chat_id})")
105
- await asyncio.sleep(delay_seconds)
106
-
107
- json_data = {
108
- "agentMode": agent_mode,
109
- "clickedAnswer2": False,
110
- "clickedAnswer3": False,
111
- "clickedForceWebSearch": False,
112
- "codeModelMode": True,
113
- "githubToken": None,
114
- "id": chat_id,
115
- "isChromeExt": False,
116
- "isMicMode": False,
117
- "maxTokens": request.max_tokens,
118
- "messages": [message_to_dict(msg) for msg in request.messages],
119
- "mobileClient": False,
120
- "playgroundTemperature": request.temperature,
121
- "playgroundTopP": request.top_p,
122
- "previewToken": None,
123
- "trendingAgentMode": trending_agent_mode,
124
- "userId": None,
125
- "userSelectedModel": MODEL_MAPPING.get(request.model, request.model),
126
- "userSystemPrompt": None,
127
- "validated": "69783381-2ce4-4dbd-ac78-35e9063feabc",
128
- "visitFromDelta": False,
129
  }
130
-
131
- async with httpx.AsyncClient() as client:
132
- try:
133
- async with client.stream(
134
- "POST",
135
- f"{BASE_URL}/api/chat",
136
- headers=headers_api_chat,
137
- json=json_data,
138
- timeout=100,
139
- ) as response:
140
- response.raise_for_status()
141
- async for line in response.aiter_lines():
142
- timestamp = int(datetime.now().timestamp())
143
- if line:
144
- content = line
145
- if content.startswith("$@$v=undefined-rv1$@$"):
146
- content = content[21:]
147
- cleaned_content = strip_model_prefix(content)
148
- yield f"data: {json.dumps(create_chat_completion_data(cleaned_content, request.model, timestamp))}\n\n"
149
-
150
- yield f"data: {json.dumps(create_chat_completion_data('', request.model, timestamp, 'stop'))}\n\n"
151
- yield "data: [DONE]\n\n"
152
- except httpx.HTTPStatusError as e:
153
- logger.error(f"HTTP error occurred for Chat ID {chat_id}: {e}")
154
- raise HTTPException(status_code=e.response.status_code, detail=str(e))
155
- except httpx.RequestError as e:
156
- logger.error(f"Error occurred during request for Chat ID {chat_id}: {e}")
157
- raise HTTPException(status_code=500, detail=str(e))
158
-
159
- # Process non-streaming response without model prefixes and referers
160
- async def process_non_streaming_response(request: ChatRequest):
161
- chat_id = generate_chat_id()
162
- referer_url = get_referer_url(chat_id, request.model)
163
- logger.info(f"Generated Chat ID: {chat_id} - Model: {request.model} - URL: {referer_url}")
164
-
165
- agent_mode = AGENT_MODE.get(request.model, {})
166
- trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
167
 
168
- headers_api_chat = {
169
- 'accept': '*/*',
170
- 'accept-language': 'en-US,en;q=0.9',
171
- 'cache-control': 'no-cache',
172
- 'origin': BASE_URL,
173
- 'pragma': 'no-cache',
174
- 'priority': 'u=1, i',
175
- 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) '
176
- 'AppleWebKit/537.36 (KHTML, like Gecko) '
177
- 'Chrome/130.0.0.0 Safari/537.36',
178
- 'Content-Type': 'application/json',
179
- 'Referer': referer_url, # Retain referer if necessary
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
180
  }
181
- headers_chat = {
182
- 'accept': '*/*',
183
- 'accept-language': 'en-US,en;q=0.9',
184
- 'cache-control': 'no-cache',
185
- 'content-type': 'text/plain;charset=UTF-8',
186
- 'origin': BASE_URL,
187
- 'pragma': 'no-cache',
188
- 'priority': 'u=1, i',
189
- 'Referer': referer_url,
190
- 'next-action': str(uuid.uuid4()),
191
- 'next-router-state-tree': json.dumps([""]),
192
- 'next-url': '/',
193
  }
194
 
195
- if request.model == 'o1-preview':
196
- delay_seconds = random.randint(20, 60)
197
- logger.info(f"Introducing a delay of {delay_seconds} seconds for model 'o1-preview' (Chat ID: {chat_id})")
198
- await asyncio.sleep(delay_seconds)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
199
 
200
- json_data = {
201
- "agentMode": agent_mode,
202
- "clickedAnswer2": False,
203
- "clickedAnswer3": False,
204
- "clickedForceWebSearch": False,
205
- "codeModelMode": True,
206
- "githubToken": None,
207
- "id": chat_id,
208
- "isChromeExt": False,
209
- "isMicMode": False,
210
- "maxTokens": request.max_tokens,
211
- "messages": [message_to_dict(msg) for msg in request.messages],
212
- "mobileClient": False,
213
- "playgroundTemperature": request.temperature,
214
- "playgroundTopP": request.top_p,
215
- "previewToken": None,
216
- "trendingAgentMode": trending_agent_mode,
217
- "userId": None,
218
- "userSelectedModel": MODEL_MAPPING.get(request.model, request.model),
219
- "userSystemPrompt": None,
220
- "validated": "69783381-2ce4-4dbd-ac78-35e9063feabc",
221
- "visitFromDelta": False,
222
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
223
 
224
- full_response = ""
225
- async with httpx.AsyncClient() as client:
226
- try:
227
- async with client.stream(
228
- method="POST", url=f"{BASE_URL}/api/chat", headers=headers_api_chat, json=json_data
229
- ) as response:
230
  response.raise_for_status()
231
- async for chunk in response.aiter_text():
232
- full_response += chunk
233
- except httpx.HTTPStatusError as e:
234
- logger.error(f"HTTP error occurred for Chat ID {chat_id}: {e}")
235
- raise HTTPException(status_code=e.response.status_code, detail=str(e))
236
- except httpx.RequestError as e:
237
- logger.error(f"Error occurred during request for Chat ID {chat_id}: {e}")
238
- raise HTTPException(status_code=500, detail=str(e))
239
-
240
- if full_response.startswith("$@$v=undefined-rv1$@$"):
241
- full_response = full_response[21:]
242
-
243
- cleaned_full_response = strip_model_prefix(full_response)
244
-
245
- return {
246
- "id": f"chatcmpl-{uuid.uuid4()}",
247
- "object": "chat.completion",
248
- "created": int(datetime.now().timestamp()),
249
- "model": request.model,
250
- "choices": [
251
- {
252
- "index": 0,
253
- "message": {"role": "assistant", "content": cleaned_full_response},
254
- "finish_reason": "stop",
255
- }
256
- ],
257
- "usage": None,
258
- }
 
1
+ # Blackbox.py
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2
 
3
+ from __future__ import annotations
4
 
5
+ from aiohttp import ClientSession
6
+ import uuid
7
+ import json
8
+ import re
9
+
10
+ from ..typing import AsyncResult, Messages, ImageType
11
+ from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
12
+ from ..image import ImageResponse, to_data_uri
13
+
14
+ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
15
+ label = "Blackbox AI"
16
+ url = "https://www.blackbox.ai"
17
+ api_endpoint = "https://www.blackbox.ai/api/chat"
18
+ working = True
19
+ supports_stream = True
20
+ supports_system_message = True
21
+ supports_message_history = True
 
 
 
 
 
 
 
 
 
 
 
 
 
 
22
 
23
+ default_model = 'blackboxai'
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
24
 
25
+ image_models = ['Image Generation', 'repomap']
26
+
27
+ userSelectedModel = ['gpt-4o', 'gemini-pro', 'claude-sonnet-3.5', 'blackboxai-pro']
28
+
29
+ agentMode = {
30
+ 'Image Generation': {'mode': True, 'id': "ImageGenerationLV45LJp", 'name': "Image Generation"},
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
31
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
32
 
33
+ trendingAgentMode = {
34
+ "gemini-1.5-flash": {'mode': True, 'id': 'Gemini'},
35
+ "llama-3.1-8b": {'mode': True, 'id': "llama-3.1-8b"},
36
+ 'llama-3.1-70b': {'mode': True, 'id': "llama-3.1-70b"},
37
+ 'llama-3.1-405b': {'mode': True, 'id': "llama-3.1-405"},
38
+ #
39
+ 'Python Agent': {'mode': True, 'id': "Python Agent"},
40
+ 'Java Agent': {'mode': True, 'id': "Java Agent"},
41
+ 'JavaScript Agent': {'mode': True, 'id': "JavaScript Agent"},
42
+ 'HTML Agent': {'mode': True, 'id': "HTML Agent"},
43
+ 'Google Cloud Agent': {'mode': True, 'id': "Google Cloud Agent"},
44
+ 'Android Developer': {'mode': True, 'id': "Android Developer"},
45
+ 'Swift Developer': {'mode': True, 'id': "Swift Developer"},
46
+ 'Next.js Agent': {'mode': True, 'id': "Next.js Agent"},
47
+ 'MongoDB Agent': {'mode': True, 'id': "MongoDB Agent"},
48
+ 'PyTorch Agent': {'mode': True, 'id': "PyTorch Agent"},
49
+ 'React Agent': {'mode': True, 'id': "React Agent"},
50
+ 'Xcode Agent': {'mode': True, 'id': "Xcode Agent"},
51
+ 'AngularJS Agent': {'mode': True, 'id': "AngularJS Agent"},
52
+ 'blackboxai-pro': {'mode': True, 'id': "BLACKBOXAI-PRO"},
53
+ #
54
+ 'repomap': {'mode': True, 'id': "repomap"},
55
+ #
56
+ 'Heroku Agent': {'mode': True, 'id': "Heroku Agent"},
57
+ 'Godot Agent': {'mode': True, 'id': "Godot Agent"},
58
+ 'Go Agent': {'mode': True, 'id': "Go Agent"},
59
+ 'Gitlab Agent': {'mode': True, 'id': "Gitlab Agent"},
60
+ 'Git Agent': {'mode': True, 'id': "Git Agent"},
61
+ 'Flask Agent': {'mode': True, 'id': "Flask Agent"},
62
+ 'Firebase Agent': {'mode': True, 'id': "Firebase Agent"},
63
+ 'FastAPI Agent': {'mode': True, 'id': "FastAPI Agent"},
64
+ 'Erlang Agent': {'mode': True, 'id': "Erlang Agent"},
65
+ 'Electron Agent': {'mode': True, 'id': "Electron Agent"},
66
+ 'Docker Agent': {'mode': True, 'id': "Docker Agent"},
67
+ 'DigitalOcean Agent': {'mode': True, 'id': "DigitalOcean Agent"},
68
+ 'Bitbucket Agent': {'mode': True, 'id': "Bitbucket Agent"},
69
+ 'Azure Agent': {'mode': True, 'id': "Azure Agent"},
70
+ 'Flutter Agent': {'mode': True, 'id': "Flutter Agent"},
71
+ 'Youtube Agent': {'mode': True, 'id': "Youtube Agent"},
72
+ 'builder Agent': {'mode': True, 'id': "builder Agent"},
73
  }
74
+
75
+ models = [default_model, *userSelectedModel, *list(agentMode.keys()), *list(trendingAgentMode.keys())]
76
+
77
+ model_aliases = {
78
+ "gemini-flash": "gemini-1.5-flash",
79
+ "claude-3.5-sonnet": "claude-sonnet-3.5",
80
+ "flux": "Image Generation",
 
 
 
 
 
81
  }
82
 
83
+ @staticmethod
84
+ def generate_id() -> str:
85
+ """Generate a UUID-based message ID."""
86
+ return str(uuid.uuid4())
87
+
88
+ @classmethod
89
+ def get_model(cls, model: str) -> str:
90
+ if model in cls.models:
91
+ return model
92
+ elif model in cls.model_aliases:
93
+ return cls.model_aliases[model]
94
+ else:
95
+ return cls.default_model
96
+
97
+ @classmethod
98
+ async def create_async_generator(
99
+ cls,
100
+ model: str,
101
+ messages: Messages,
102
+ proxy: str = None,
103
+ web_search: bool = False,
104
+ image: ImageType = None,
105
+ image_name: str = None,
106
+ **kwargs
107
+ ) -> AsyncResult:
108
+ model = cls.get_model(model)
109
+ message_id = cls.generate_id()
110
+ # Removed add_prefix_to_messages since prefixes are no longer used
111
+ messages_processed = messages.copy()
112
+
113
+ if image is not None:
114
+ messages_processed[-1]['data'] = {
115
+ 'fileText': '',
116
+ 'imageBase64': to_data_uri(image),
117
+ 'title': image_name
118
+ }
119
 
120
+ headers = {
121
+ 'Accept': '*/*',
122
+ 'Accept-Language': 'en-US,en;q=0.9',
123
+ 'Cache-Control': 'no-cache',
124
+ 'Content-Type': 'application/json',
125
+ 'Origin': cls.url,
126
+ 'Pragma': 'no-cache',
127
+ 'Priority': 'u=1, i',
128
+ 'Referer': f'{cls.url}/',
129
+ 'User-Agent': (
130
+ 'Mozilla/5.0 (X11; Linux x86_64) '
131
+ 'AppleWebKit/537.36 (KHTML, like Gecko) '
132
+ 'Chrome/130.0.0.0 Safari/537.36'
133
+ )
134
+ }
135
+
136
+ data = {
137
+ "messages": messages_processed,
138
+ "id": message_id,
139
+ "previewToken": None,
140
+ "userId": None,
141
+ "codeModelMode": True,
142
+ "agentMode": cls.agentMode.get(model, {}) if model in cls.agentMode else {},
143
+ "trendingAgentMode": cls.trendingAgentMode.get(model, {}) if model in cls.trendingAgentMode else {},
144
+ "isMicMode": False,
145
+ "userSystemPrompt": None,
146
+ "maxTokens": 1024,
147
+ "playgroundTopP": 0.9,
148
+ "playgroundTemperature": 0.5,
149
+ "isChromeExt": False,
150
+ "githubToken": None,
151
+ "clickedAnswer2": False,
152
+ "clickedAnswer3": False,
153
+ "clickedForceWebSearch": False,
154
+ "visitFromDelta": False,
155
+ "mobileClient": False,
156
+ "userSelectedModel": model if model in cls.userSelectedModel else None,
157
+ "webSearchMode": web_search,
158
+ "validated": "69783381-2ce4-4dbd-ac78-35e9063feabc"
159
+ }
160
 
161
+ async with ClientSession(headers=headers) as session:
162
+ async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response:
 
 
 
 
163
  response.raise_for_status()
164
+ response_text = await response.text()
165
+
166
+ if model in cls.image_models:
167
+ image_matches = re.findall(r'!\[.*?\]\((https?://[^\)]+)\)', response_text)
168
+ if image_matches:
169
+ image_url = image_matches[0]
170
+ image_response = ImageResponse(images=[image_url], alt="Generated Image")
171
+ yield image_response
172
+ return
173
+
174
+ json_match = re.search(r'\$~~~\$(.*?)\$~~~\$', response_text, re.DOTALL)
175
+ if json_match:
176
+ search_results = json.loads(json_match.group(1))
177
+ answer = response_text.split('$~~~$')[-1].strip()
178
+
179
+ formatted_response = f"{answer}\n\n**Source:**"
180
+ for i, result in enumerate(search_results, 1):
181
+ formatted_response += f"\n{i}. {result['title']}: {result['link']}"
182
+
183
+ yield formatted_response
184
+ else:
185
+ yield response_text.strip()