|
|
|
|
|
from __future__ import annotations |
|
|
|
import json |
|
from datetime import datetime |
|
import uuid |
|
from typing import Any, Dict, Optional |
|
|
|
import httpx |
|
from api.config import ( |
|
MODEL_MAPPING, |
|
headers, |
|
BASE_URL, |
|
MODEL_PREFIXES, |
|
MODEL_REFERERS, |
|
) |
|
from api.models import ChatRequest |
|
from api.logger import setup_logger |
|
from api.image import ImageResponse |
|
from api.typing import AsyncResult, Messages |
|
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin |
|
|
|
logger = setup_logger(__name__) |
|
|
|
class BlackBoxAI(AsyncGeneratorProvider, ProviderModelMixin): |
|
url = "https://www.blackbox.ai" |
|
api_endpoint = "https://www.blackbox.ai/api/chat" |
|
working = True |
|
|
|
supports_system_message = True |
|
supports_message_history = True |
|
|
|
|
|
default_model = 'blackboxai' |
|
chat_models = [ |
|
'blackboxai', |
|
'blackboxai-pro', |
|
'flux', |
|
'llama-3.1-8b', |
|
'llama-3.1-70b', |
|
'llama-3.1-405b', |
|
'gpt-4o', |
|
'gemini-pro', |
|
'gemini-1.5-flash', |
|
'claude-sonnet-3.5', |
|
'PythonAgent', |
|
'JavaAgent', |
|
'JavaScriptAgent', |
|
'HTMLAgent', |
|
'GoogleCloudAgent', |
|
'AndroidDeveloper', |
|
'SwiftDeveloper', |
|
'Next.jsAgent', |
|
'MongoDBAgent', |
|
'PyTorchAgent', |
|
'ReactAgent', |
|
'XcodeAgent', |
|
'AngularJSAgent', |
|
'RepoMap', |
|
'gemini-1.5-pro-latest', |
|
'gemini-1.5-pro', |
|
'claude-3-5-sonnet-20240620', |
|
'claude-3-5-sonnet', |
|
'Niansuh', |
|
] |
|
|
|
image_models = [] |
|
|
|
models = chat_models + image_models |
|
|
|
model_aliases = { |
|
|
|
} |
|
|
|
@classmethod |
|
def get_model(cls, model: str) -> str: |
|
return MODEL_MAPPING.get(model, cls.default_model) |
|
|
|
@classmethod |
|
def is_image_model(cls, model: str) -> bool: |
|
return model in cls.image_models |
|
|
|
@classmethod |
|
async def create_async_generator( |
|
cls, |
|
model: str, |
|
messages: Messages, |
|
proxy: str = None, |
|
**kwargs |
|
) -> AsyncResult: |
|
model = cls.get_model(model) |
|
model_prefix = MODEL_PREFIXES.get(model, "") |
|
referer_path = MODEL_REFERERS.get(model, f"/?model={model}") |
|
referer_url = f"{BASE_URL}{referer_path}" |
|
|
|
|
|
dynamic_headers = headers.copy() |
|
dynamic_headers['Referer'] = referer_url |
|
|
|
json_data = { |
|
"messages": [cls.message_to_dict(msg, model_prefix) for msg in messages], |
|
"stream": kwargs.get('stream', False), |
|
"temperature": kwargs.get('temperature', 0.7), |
|
"top_p": kwargs.get('top_p', 0.9), |
|
"max_tokens": kwargs.get('max_tokens', 99999999), |
|
} |
|
|
|
async with httpx.AsyncClient() as client: |
|
try: |
|
if json_data.get("stream"): |
|
async with client.stream( |
|
"POST", |
|
cls.api_endpoint, |
|
headers=dynamic_headers, |
|
json=json_data, |
|
timeout=100, |
|
) as response: |
|
response.raise_for_status() |
|
async for line in response.aiter_lines(): |
|
timestamp = int(datetime.now().timestamp()) |
|
if line: |
|
content = line |
|
if content.startswith("$@$v=undefined-rv1$@$"): |
|
content = content[21:] |
|
|
|
cleaned_content = cls.strip_model_prefix(content, model_prefix) |
|
yield f"data: {json.dumps(cls.create_chat_completion_data(cleaned_content, model, timestamp))}\n\n" |
|
|
|
yield f"data: {json.dumps(cls.create_chat_completion_data('', model, timestamp, 'stop'))}\n\n" |
|
yield "data: [DONE]\n\n" |
|
else: |
|
response = await client.post( |
|
cls.api_endpoint, |
|
headers=dynamic_headers, |
|
json=json_data, |
|
timeout=100, |
|
) |
|
response.raise_for_status() |
|
full_response = response.text |
|
if full_response.startswith("$@$v=undefined-rv1$@$"): |
|
full_response = full_response[21:] |
|
|
|
cleaned_full_response = cls.strip_model_prefix(full_response, model_prefix) |
|
return { |
|
"id": f"chatcmpl-{uuid.uuid4()}", |
|
"object": "chat.completion", |
|
"created": int(datetime.now().timestamp()), |
|
"model": model, |
|
"choices": [ |
|
{ |
|
"index": 0, |
|
"message": {"role": "assistant", "content": cleaned_full_response}, |
|
"finish_reason": "stop", |
|
} |
|
], |
|
"usage": None, |
|
} |
|
except httpx.HTTPStatusError as e: |
|
logger.error(f"HTTP error occurred: {e}") |
|
raise HTTPException(status_code=e.response.status_code, detail=str(e)) |
|
except httpx.RequestError as e: |
|
logger.error(f"Error occurred during request: {e}") |
|
raise HTTPException(status_code=500, detail=str(e)) |
|
|
|
@staticmethod |
|
def message_to_dict(message, model_prefix: Optional[str] = None): |
|
if isinstance(message["content"], str): |
|
content = message["content"] |
|
if model_prefix: |
|
content = f"{model_prefix} {content}" |
|
return {"role": message["role"], "content": content} |
|
elif isinstance(message["content"], list) and len(message["content"]) == 2: |
|
content = message["content"][0]["text"] |
|
if model_prefix: |
|
content = f"{model_prefix} {content}" |
|
return { |
|
"role": message["role"], |
|
"content": content, |
|
"data": { |
|
"imageBase64": message["content"][1]["image_url"]["url"], |
|
"fileText": "", |
|
"title": "snapshot", |
|
}, |
|
} |
|
else: |
|
return {"role": message["role"], "content": message["content"]} |
|
|
|
@staticmethod |
|
def strip_model_prefix(content: str, model_prefix: Optional[str] = None) -> str: |
|
"""Remove the model prefix from the response content if present.""" |
|
if model_prefix and content.startswith(model_prefix): |
|
logger.debug(f"Stripping prefix '{model_prefix}' from content.") |
|
return content[len(model_prefix):].strip() |
|
logger.debug("No prefix to strip from content.") |
|
return content |
|
|
|
@staticmethod |
|
def create_chat_completion_data( |
|
content: str, model: str, timestamp: int, finish_reason: Optional[str] = None |
|
) -> Dict[str, Any]: |
|
return { |
|
"id": f"chatcmpl-{uuid.uuid4()}", |
|
"object": "chat.completion.chunk", |
|
"created": timestamp, |
|
"model": model, |
|
"choices": [ |
|
{ |
|
"index": 0, |
|
"delta": {"content": content, "role": "assistant"}, |
|
"finish_reason": finish_reason, |
|
} |
|
], |
|
"usage": None, |
|
} |
|
|