Niansuh commited on
Commit
4dbcc1a
·
verified ·
1 Parent(s): 7d8a663

Delete api

Browse files
api/__init__.py DELETED
File without changes
api/__pycache__/__init__.cpython-310.pyc DELETED
Binary file (153 Bytes)
 
api/__pycache__/app.cpython-310.pyc DELETED
Binary file (1.13 kB)
 
api/__pycache__/auth.cpython-310.pyc DELETED
Binary file (594 Bytes)
 
api/__pycache__/config.cpython-310.pyc DELETED
Binary file (1.06 kB)
 
api/__pycache__/dummy.txt DELETED
@@ -1 +0,0 @@
1
-
 
 
api/__pycache__/logger.cpython-310.pyc DELETED
Binary file (541 Bytes)
 
api/__pycache__/models.cpython-310.pyc DELETED
Binary file (853 Bytes)
 
api/__pycache__/routes.cpython-310.pyc DELETED
Binary file (2.58 kB)
 
api/__pycache__/utils.cpython-310.pyc DELETED
Binary file (4.69 kB)
 
api/app.py DELETED
@@ -1,40 +0,0 @@
1
- from fastapi import FastAPI, Request
2
- from starlette.middleware.cors import CORSMiddleware
3
- from fastapi.responses import JSONResponse
4
- from api.logger import setup_logger
5
- from api.routes import router
6
-
7
- logger = setup_logger(__name__)
8
-
9
- def create_app():
10
- app = FastAPI(
11
- title="NiansuhAI API Gateway",
12
- docs_url=None, # Disable Swagger UI
13
- redoc_url=None, # Disable ReDoc
14
- openapi_url=None, # Disable OpenAPI schema
15
- )
16
-
17
- # CORS settings
18
- app.add_middleware(
19
- CORSMiddleware,
20
- allow_origins=["*"], # Adjust as needed for security
21
- allow_credentials=True,
22
- allow_methods=["*"],
23
- allow_headers=["*"],
24
- )
25
-
26
- # Include routes
27
- app.include_router(router)
28
-
29
- # Global exception handler for better error reporting
30
- @app.exception_handler(Exception)
31
- async def global_exception_handler(request: Request, exc: Exception):
32
- logger.error(f"An error occurred: {str(exc)}")
33
- return JSONResponse(
34
- status_code=500,
35
- content={"message": "An internal server error occurred."},
36
- )
37
-
38
- return app
39
-
40
- app = create_app()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
api/auth.py DELETED
@@ -1,10 +0,0 @@
1
- from fastapi import Depends, HTTPException
2
- from fastapi.security import HTTPAuthorizationCredentials, HTTPBearer
3
- from api.config import APP_SECRET
4
-
5
- security = HTTPBearer()
6
-
7
- def verify_app_secret(credentials: HTTPAuthorizationCredentials = Depends(security)):
8
- if credentials.credentials != APP_SECRET:
9
- raise HTTPException(status_code=403, detail="Invalid APP_SECRET")
10
- return credentials.credentials
 
 
 
 
 
 
 
 
 
 
 
api/config.py DELETED
@@ -1,153 +0,0 @@
1
- import os
2
- from dotenv import load_dotenv
3
-
4
- load_dotenv()
5
-
6
- BASE_URL = "https://www.blackbox.ai"
7
- headers = {
8
- 'accept': '*/*',
9
- 'accept-language': 'en-US,en;q=0.9',
10
- 'origin': 'https://www.blackbox.ai',
11
- 'priority': 'u=1, i',
12
- 'sec-ch-ua': '"Chromium";v="130", "Google Chrome";v="130", "Not?A_Brand";v="99"',
13
- 'sec-ch-ua-mobile': '?0',
14
- 'sec-ch-ua-platform': '"Windows"',
15
- 'sec-fetch-dest': 'empty',
16
- 'sec-fetch-mode': 'cors',
17
- 'sec-fetch-site': 'same-origin',
18
- 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) '
19
- 'AppleWebKit/537.36 (KHTML, like Gecko) '
20
- 'Chrome/130.0.0.0 Safari/537.36',
21
- }
22
- APP_SECRET = os.getenv("APP_SECRET")
23
-
24
- ALLOWED_MODELS = [
25
- {"id": "blackboxai", "name": "blackboxai"},
26
- {"id": "blackboxai-pro", "name": "blackboxai-pro"},
27
- {"id": "flux", "name": "flux"},
28
- {"id": "llama-3.1-8b", "name": "llama-3.1-8b"},
29
- {"id": "llama-3.1-70b", "name": "llama-3.1-70b"},
30
- {"id": "llama-3.1-405b", "name": "llama-3.1-405b"},
31
- {"id": "gpt-4o", "name": "gpt-4o"},
32
- {"id": "gemini-pro", "name": "gemini-pro"},
33
- {"id": "gemini-1.5-flash", "name": "gemini-1.5-flash"},
34
- {"id": "claude-sonnet-3.5", "name": "claude-sonnet-3.5"},
35
- {"id": "PythonAgent", "name": "PythonAgent"},
36
- {"id": "JavaAgent", "name": "JavaAgent"},
37
- {"id": "JavaScriptAgent", "name": "JavaScriptAgent"},
38
- {"id": "HTMLAgent", "name": "HTMLAgent"},
39
- {"id": "GoogleCloudAgent", "name": "GoogleCloudAgent"},
40
- {"id": "AndroidDeveloper", "name": "AndroidDeveloper"},
41
- {"id": "SwiftDeveloper", "name": "SwiftDeveloper"},
42
- {"id": "Next.jsAgent", "name": "Next.jsAgent"},
43
- {"id": "MongoDBAgent", "name": "MongoDBAgent"},
44
- {"id": "PyTorchAgent", "name": "PyTorchAgent"},
45
- {"id": "ReactAgent", "name": "ReactAgent"},
46
- {"id": "XcodeAgent", "name": "XcodeAgent"},
47
- {"id": "AngularJSAgent", "name": "AngularJSAgent"},
48
- {"id": "RepoMap", "name": "RepoMap"},
49
- {"id": "gemini-1.5-pro-latest", "name": "gemini-pro"},
50
- {"id": "gemini-1.5-pro", "name": "gemini-1.5-pro"},
51
- {"id": "claude-3-5-sonnet-20240620", "name": "claude-sonnet-3.5"},
52
- {"id": "claude-3-5-sonnet", "name": "claude-sonnet-3.5"},
53
- {"id": "Niansuh", "name": "Niansuh"},
54
- ]
55
-
56
- MODEL_MAPPING = {
57
- "blackboxai": "blackboxai",
58
- "blackboxai-pro": "blackboxai-pro",
59
- "flux": "flux",
60
- "ImageGeneration": "flux",
61
- "llama-3.1-8b": "llama-3.1-8b",
62
- "llama-3.1-70b": "llama-3.1-70b",
63
- "llama-3.1-405b": "llama-3.1-405b",
64
- "gpt-4o": "gpt-4o",
65
- "gemini-pro": "gemini-pro",
66
- "gemini-1.5-flash": "gemini-1.5-flash",
67
- "claude-sonnet-3.5": "claude-sonnet-3.5",
68
- "PythonAgent": "PythonAgent",
69
- "JavaAgent": "JavaAgent",
70
- "JavaScriptAgent": "JavaScriptAgent",
71
- "HTMLAgent": "HTMLAgent",
72
- "GoogleCloudAgent": "GoogleCloudAgent",
73
- "AndroidDeveloper": "AndroidDeveloper",
74
- "SwiftDeveloper": "SwiftDeveloper",
75
- "Next.jsAgent": "Next.jsAgent",
76
- "MongoDBAgent": "MongoDBAgent",
77
- "PyTorchAgent": "PyTorchAgent",
78
- "ReactAgent": "ReactAgent",
79
- "XcodeAgent": "XcodeAgent",
80
- "AngularJSAgent": "AngularJSAgent",
81
- "RepoMap": "RepoMap",
82
- # Additional mappings
83
- "gemini-flash": "gemini-1.5-flash",
84
- "claude-3.5-sonnet": "claude-sonnet-3.5",
85
- "flux": "flux",
86
- "gemini-1.5-pro-latest": "gemini-pro",
87
- "gemini-1.5-pro": "gemini-1.5-pro",
88
- "claude-3-5-sonnet-20240620": "claude-sonnet-3.5",
89
- "claude-3-5-sonnet": "claude-sonnet-3.5",
90
- "Niansuh": "Niansuh",
91
- }
92
-
93
- # Agent modes
94
- AGENT_MODE = {
95
- 'flux': {'mode': True, 'id': "ImageGenerationLV45LJp", 'name': "flux"},
96
- 'Niansuh': {'mode': True, 'id': "NiansuhAIk1HgESy", 'name': "Niansuh"},
97
-
98
- }
99
-
100
- TRENDING_AGENT_MODE = {
101
- "blackboxai": {},
102
- "gemini-1.5-flash": {'mode': True, 'id': 'Gemini'},
103
- "llama-3.1-8b": {'mode': True, 'id': "llama-3.1-8b"},
104
- 'llama-3.1-70b': {'mode': True, 'id': "llama-3.1-70b"},
105
- 'llama-3.1-405b': {'mode': True, 'id': "llama-3.1-405b"},
106
- 'blackboxai-pro': {'mode': True, 'id': "BLACKBOXAI-PRO"},
107
- 'PythonAgent': {'mode': True, 'id': "Python Agent"},
108
- 'JavaAgent': {'mode': True, 'id': "Java Agent"},
109
- 'JavaScriptAgent': {'mode': True, 'id': "JavaScript Agent"},
110
- 'HTMLAgent': {'mode': True, 'id': "HTML Agent"},
111
- 'GoogleCloudAgent': {'mode': True, 'id': "Google Cloud Agent"},
112
- 'AndroidDeveloper': {'mode': True, 'id': "Android Developer"},
113
- 'SwiftDeveloper': {'mode': True, 'id': "Swift Developer"},
114
- 'Next.jsAgent': {'mode': True, 'id': "Next.js Agent"},
115
- 'MongoDBAgent': {'mode': True, 'id': "MongoDB Agent"},
116
- 'PyTorchAgent': {'mode': True, 'id': "PyTorch Agent"},
117
- 'ReactAgent': {'mode': True, 'id': "React Agent"},
118
- 'XcodeAgent': {'mode': True, 'id': "Xcode Agent"},
119
- 'AngularJSAgent': {'mode': True, 'id': "AngularJS Agent"},
120
- 'RepoMap': {'mode': True, 'id': "repomap"},
121
- }
122
-
123
- # Model prefixes
124
- MODEL_PREFIXES = {
125
- 'gpt-4o': '@GPT-4o',
126
- 'gemini-pro': '@Gemini-PRO',
127
- 'claude-sonnet-3.5': '@Claude-Sonnet-3.5',
128
- 'PythonAgent': '@Python Agent',
129
- 'JavaAgent': '@Java Agent',
130
- 'JavaScriptAgent': '@JavaScript Agent',
131
- 'HTMLAgent': '@HTML Agent',
132
- 'GoogleCloudAgent': '@Google Cloud Agent',
133
- 'AndroidDeveloper': '@Android Developer',
134
- 'SwiftDeveloper': '@Swift Developer',
135
- 'Next.jsAgent': '@Next.js Agent',
136
- 'MongoDBAgent': '@MongoDB Agent',
137
- 'PyTorchAgent': '@PyTorch Agent',
138
- 'ReactAgent': '@React Agent',
139
- 'XcodeAgent': '@Xcode Agent',
140
- 'AngularJSAgent': '@AngularJS Agent',
141
- 'blackboxai-pro': '@BLACKBOXAI-PRO',
142
- 'flux': '@Image Generation',
143
- # Add any additional prefixes if necessary
144
- }
145
-
146
- # Model referers
147
- MODEL_REFERERS = {
148
- "blackboxai": "/?model=blackboxai",
149
- "gpt-4o": "/?model=gpt-4o",
150
- "gemini-pro": "/?model=gemini-pro",
151
- "claude-sonnet-3.5": "/?model=claude-sonnet-3.5"
152
- # Add any additional referers if necessary
153
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
api/gizai.py DELETED
@@ -1,169 +0,0 @@
1
- # api/gizai.py
2
-
3
- from __future__ import annotations
4
-
5
- import json
6
- from aiohttp import ClientSession
7
- from typing import AsyncGenerator, Union
8
-
9
- from .models import AsyncResult, Messages, ImageResponse
10
- from .utils import strip_model_prefix
11
-
12
- class AsyncGeneratorProvider:
13
- @classmethod
14
- async def create_async_generator(cls, *args, **kwargs) -> AsyncGenerator:
15
- """Abstract method to create an asynchronous generator."""
16
- raise NotImplementedError
17
-
18
- class ProviderModelMixin:
19
- @classmethod
20
- def get_model(cls, model: str) -> str:
21
- """Abstract method to get the actual model name."""
22
- raise NotImplementedError
23
-
24
- class GizAI(AsyncGeneratorProvider, ProviderModelMixin):
25
- url = "https://app.giz.ai/assistant/"
26
- api_endpoint = "https://app.giz.ai/api/data/users/inferenceServer.infer"
27
- working = True
28
-
29
- supports_system_message = True
30
- supports_message_history = True
31
-
32
- # Chat models
33
- default_model = 'chat-gemini-flash'
34
- chat_models = [
35
- default_model,
36
- 'chat-gemini-pro',
37
- 'chat-gpt4m',
38
- 'chat-gpt4',
39
- 'claude-sonnet',
40
- 'claude-haiku',
41
- 'llama-3-70b',
42
- 'llama-3-8b',
43
- 'mistral-large',
44
- 'chat-o1-mini'
45
- ]
46
-
47
- # Image models
48
- image_models = [
49
- 'flux1',
50
- 'sdxl',
51
- 'sd',
52
- 'sd35',
53
- ]
54
-
55
- models = [*chat_models, *image_models]
56
-
57
- model_aliases = {
58
- # Chat model aliases
59
- "gemini-flash": "chat-gemini-flash",
60
- "gemini-pro": "chat-gemini-pro",
61
- "gpt-4o-mini": "chat-gpt4m",
62
- "gpt-4o": "chat-gpt4",
63
- "claude-3.5-sonnet": "claude-sonnet",
64
- "claude-3-haiku": "claude-haiku",
65
- "llama-3.1-70b": "llama-3-70b",
66
- "llama-3.1-8b": "llama-3-8b",
67
- "o1-mini": "chat-o1-mini",
68
- # Image model aliases
69
- "sd-1.5": "sd",
70
- "sd-3.5": "sd35",
71
- "flux-schnell": "flux1",
72
- }
73
-
74
- @classmethod
75
- def get_model(cls, model: str) -> str:
76
- """Retrieve the actual model name, handling aliases."""
77
- if model in cls.models:
78
- return model
79
- elif model in cls.model_aliases:
80
- return cls.model_aliases[model]
81
- else:
82
- return cls.default_model
83
-
84
- @classmethod
85
- def is_image_model(cls, model: str) -> bool:
86
- """Determine if the given model is an image generation model."""
87
- return model in cls.image_models
88
-
89
- @classmethod
90
- async def create_async_generator(
91
- cls,
92
- model: str,
93
- messages: Messages,
94
- proxy: str = None,
95
- **kwargs
96
- ) -> AsyncResult:
97
- """Create an asynchronous generator for processing requests."""
98
- model = cls.get_model(model)
99
-
100
- headers = {
101
- 'Accept': 'application/json, text/plain, */*',
102
- 'Accept-Language': 'en-US,en;q=0.9',
103
- 'Cache-Control': 'no-cache',
104
- 'Connection': 'keep-alive',
105
- 'Content-Type': 'application/json',
106
- 'Origin': 'https://app.giz.ai',
107
- 'Pragma': 'no-cache',
108
- 'Sec-Fetch-Dest': 'empty',
109
- 'Sec-Fetch-Mode': 'cors',
110
- 'Sec-Fetch-Site': 'same-origin',
111
- 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/130.0.0.0 Safari/537.36',
112
- 'sec-ch-ua': '"Not?A_Brand";v="99", "Chromium";v="130"',
113
- 'sec-ch-ua-mobile': '?0',
114
- 'sec-ch-ua-platform': '"Linux"'
115
- }
116
-
117
- async with ClientSession() as session:
118
- if cls.is_image_model(model):
119
- # Image generation
120
- prompt = messages[-1]["content"]
121
- data = {
122
- "model": model,
123
- "input": {
124
- "width": "1024",
125
- "height": "1024",
126
- "steps": 4,
127
- "output_format": "webp",
128
- "batch_size": 1,
129
- "mode": "plan",
130
- "prompt": prompt
131
- }
132
- }
133
- async with session.post(
134
- cls.api_endpoint,
135
- headers=headers,
136
- data=json.dumps(data),
137
- proxy=proxy
138
- ) as response:
139
- response.raise_for_status()
140
- response_data = await response.json()
141
- if response_data.get('status') == 'completed' and response_data.get('output'):
142
- for url in response_data['output']:
143
- yield ImageResponse(images=url, alt="Generated Image")
144
- else:
145
- # Chat completion
146
- # Directly format the prompt without using a separate helper
147
- prompt = "\n".join([f"{msg['role']}: {msg['content']}" for msg in messages])
148
- data = {
149
- "model": model,
150
- "input": {
151
- "messages": [
152
- {
153
- "type": "human",
154
- "content": prompt
155
- }
156
- ],
157
- "mode": "plan"
158
- },
159
- "noStream": True
160
- }
161
- async with session.post(
162
- cls.api_endpoint,
163
- headers=headers,
164
- data=json.dumps(data),
165
- proxy=proxy
166
- ) as response:
167
- response.raise_for_status()
168
- result = await response.json()
169
- yield result.get('output', '')
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
api/logger.py DELETED
@@ -1,20 +0,0 @@
1
- import logging
2
-
3
- def setup_logger(name):
4
- logger = logging.getLogger(name)
5
- if not logger.handlers:
6
- logger.setLevel(logging.INFO)
7
- formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
8
-
9
- # Console handler
10
- console_handler = logging.StreamHandler()
11
- console_handler.setFormatter(formatter)
12
- logger.addHandler(console_handler)
13
-
14
- # File Handler - Error Level
15
- # error_file_handler = logging.FileHandler('error.log')
16
- # error_file_handler.setFormatter(formatter)
17
- # error_file_handler.setLevel(logging.ERROR)
18
- # logger.addHandler(error_file_handler)
19
-
20
- return logger
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
api/models.py DELETED
@@ -1,14 +0,0 @@
1
- from typing import List, Optional
2
- from pydantic import BaseModel
3
-
4
- class Message(BaseModel):
5
- role: str
6
- content: str | list
7
-
8
- class ChatRequest(BaseModel):
9
- model: str
10
- messages: List[Message]
11
- stream: Optional[bool] = False
12
- temperature: Optional[float] = 0.7
13
- top_p: Optional[float] = 0.9
14
- max_tokens: Optional[int] = 99999999
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
api/provider/blackboxai.py DELETED
@@ -1,128 +0,0 @@
1
- from __future__ import annotations
2
-
3
- import json
4
- from aiohttp import ClientSession
5
-
6
- from ..typing import AsyncResult, Messages
7
- from ..image import ImageResponse
8
- from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
9
- from .helper import format_prompt
10
-
11
- class BlackBoxAI(AsyncGeneratorProvider, ProviderModelMixin):
12
- url = "https://api.blackboxai.com/assistant/"
13
- api_endpoint = "https://api.blackboxai.com/api/data/users/inferenceServer.infer"
14
- working = True
15
-
16
- supports_system_message = True
17
- supports_message_history = True
18
-
19
- # Chat models
20
- default_model = 'chat-blackboxai-basic'
21
- chat_models = [
22
- default_model,
23
- # Add other chat models as needed
24
- ]
25
-
26
- # Image models
27
- image_models = [
28
- 'bbai-sdxl',
29
- # Add other image models as needed
30
- ]
31
-
32
- models = [*chat_models, *image_models]
33
-
34
- model_aliases = {
35
- # Define aliases as needed
36
- }
37
-
38
- @classmethod
39
- def get_model(cls, model: str) -> str:
40
- if model in cls.models:
41
- return model
42
- elif model in cls.model_aliases:
43
- return cls.model_aliases[model]
44
- else:
45
- return cls.default_model
46
-
47
- @classmethod
48
- def is_image_model(cls, model: str) -> bool:
49
- return model in cls.image_models
50
-
51
- @classmethod
52
- async def create_async_generator(
53
- cls,
54
- model: str,
55
- messages: Messages,
56
- proxy: str = None,
57
- **kwargs
58
- ) -> AsyncResult:
59
- model = cls.get_model(model)
60
-
61
- headers = {
62
- 'Accept': 'application/json, text/plain, */*',
63
- 'Accept-Language': 'en-US,en;q=0.9',
64
- 'Cache-Control': 'no-cache',
65
- 'Connection': 'keep-alive',
66
- 'Content-Type': 'application/json',
67
- 'Origin': 'https://api.blackboxai.com',
68
- 'Pragma': 'no-cache',
69
- 'Sec-Fetch-Dest': 'empty',
70
- 'Sec-Fetch-Mode': 'cors',
71
- 'Sec-Fetch-Site': 'same-origin',
72
- 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/130.0.0.0 Safari/537.36',
73
- 'sec-ch-ua': '"Not?A_Brand";v="99", "Chromium";v="130"',
74
- 'sec-ch-ua-mobile': '?0',
75
- 'sec-ch-ua-platform': '"Linux"'
76
- }
77
-
78
- async with ClientSession() as session:
79
- if cls.is_image_model(model):
80
- # Image generation
81
- prompt = messages[-1]["content"]
82
- data = {
83
- "model": model,
84
- "input": {
85
- "width": "1024",
86
- "height": "1024",
87
- "steps": 4,
88
- "output_format": "webp",
89
- "batch_size": 1,
90
- "mode": "plan",
91
- "prompt": prompt
92
- }
93
- }
94
- async with session.post(
95
- cls.api_endpoint,
96
- headers=headers,
97
- data=json.dumps(data),
98
- proxy=proxy
99
- ) as response:
100
- response.raise_for_status()
101
- response_data = await response.json()
102
- if response_data.get('status') == 'completed' and response_data.get('output'):
103
- for url in response_data['output']:
104
- yield ImageResponse(images=url, alt="Generated Image")
105
- else:
106
- # Chat completion
107
- data = {
108
- "model": model,
109
- "input": {
110
- "messages": [
111
- {
112
- "type": "human",
113
- "content": format_prompt(messages)
114
- }
115
- ],
116
- "mode": "plan"
117
- },
118
- "noStream": True
119
- }
120
- async with session.post(
121
- cls.api_endpoint,
122
- headers=headers,
123
- data=json.dumps(data),
124
- proxy=proxy
125
- ) as response:
126
- response.raise_for_status()
127
- result = await response.json()
128
- yield result.get('output', '')
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
api/provider/gizai.py DELETED
@@ -1,151 +0,0 @@
1
- from __future__ import annotations
2
-
3
- import json
4
- from aiohttp import ClientSession
5
-
6
- from ..typing import AsyncResult, Messages
7
- from ..image import ImageResponse
8
- from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
9
- from .helper import format_prompt
10
-
11
- class GizAI(AsyncGeneratorProvider, ProviderModelMixin):
12
- url = "https://app.giz.ai/assistant/"
13
- api_endpoint = "https://app.giz.ai/api/data/users/inferenceServer.infer"
14
- working = True
15
-
16
- supports_system_message = True
17
- supports_message_history = True
18
-
19
- # Chat models
20
- default_model = 'chat-gemini-flash'
21
- chat_models = [
22
- default_model,
23
- 'chat-gemini-pro',
24
- 'chat-gpt4m',
25
- 'chat-gpt4',
26
- 'claude-sonnet',
27
- 'claude-haiku',
28
- 'llama-3-70b',
29
- 'llama-3-8b',
30
- 'mistral-large',
31
- 'chat-o1-mini'
32
- ]
33
-
34
- # Image models
35
- image_models = [
36
- 'flux1',
37
- 'sdxl',
38
- 'sd',
39
- 'sd35',
40
- ]
41
-
42
- models = [*chat_models, *image_models]
43
-
44
- model_aliases = {
45
- # Chat model aliases
46
- "gemini-flash": "chat-gemini-flash",
47
- "gemini-pro": "chat-gemini-pro",
48
- "gpt-4o-mini": "chat-gpt4m",
49
- "gpt-4o": "chat-gpt4",
50
- "claude-3.5-sonnet": "claude-sonnet",
51
- "claude-3-haiku": "claude-haiku",
52
- "llama-3.1-70b": "llama-3-70b",
53
- "llama-3.1-8b": "llama-3-8b",
54
- "o1-mini": "chat-o1-mini",
55
- # Image model aliases
56
- "sd-1.5": "sd",
57
- "sd-3.5": "sd35",
58
- "flux-schnell": "flux1",
59
- }
60
-
61
- @classmethod
62
- def get_model(cls, model: str) -> str:
63
- if model in cls.models:
64
- return model
65
- elif model in cls.model_aliases:
66
- return cls.model_aliases[model]
67
- else:
68
- return cls.default_model
69
-
70
- @classmethod
71
- def is_image_model(cls, model: str) -> bool:
72
- return model in cls.image_models
73
-
74
- @classmethod
75
- async def create_async_generator(
76
- cls,
77
- model: str,
78
- messages: Messages,
79
- proxy: str = None,
80
- **kwargs
81
- ) -> AsyncResult:
82
- model = cls.get_model(model)
83
-
84
- headers = {
85
- 'Accept': 'application/json, text/plain, */*',
86
- 'Accept-Language': 'en-US,en;q=0.9',
87
- 'Cache-Control': 'no-cache',
88
- 'Connection': 'keep-alive',
89
- 'Content-Type': 'application/json',
90
- 'Origin': 'https://app.giz.ai',
91
- 'Pragma': 'no-cache',
92
- 'Sec-Fetch-Dest': 'empty',
93
- 'Sec-Fetch-Mode': 'cors',
94
- 'Sec-Fetch-Site': 'same-origin',
95
- 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/130.0.0.0 Safari/537.36',
96
- 'sec-ch-ua': '"Not?A_Brand";v="99", "Chromium";v="130"',
97
- 'sec-ch-ua-mobile': '?0',
98
- 'sec-ch-ua-platform': '"Linux"'
99
- }
100
-
101
- async with ClientSession() as session:
102
- if cls.is_image_model(model):
103
- # Image generation
104
- prompt = messages[-1]["content"]
105
- data = {
106
- "model": model,
107
- "input": {
108
- "width": "1024",
109
- "height": "1024",
110
- "steps": 4,
111
- "output_format": "webp",
112
- "batch_size": 1,
113
- "mode": "plan",
114
- "prompt": prompt
115
- }
116
- }
117
- async with session.post(
118
- cls.api_endpoint,
119
- headers=headers,
120
- data=json.dumps(data),
121
- proxy=proxy
122
- ) as response:
123
- response.raise_for_status()
124
- response_data = await response.json()
125
- if response_data.get('status') == 'completed' and response_data.get('output'):
126
- for url in response_data['output']:
127
- yield ImageResponse(images=url, alt="Generated Image")
128
- else:
129
- # Chat completion
130
- data = {
131
- "model": model,
132
- "input": {
133
- "messages": [
134
- {
135
- "type": "human",
136
- "content": format_prompt(messages)
137
- }
138
- ],
139
- "mode": "plan"
140
- },
141
- "noStream": True
142
- }
143
- async with session.post(
144
- cls.api_endpoint,
145
- headers=headers,
146
- data=json.dumps(data),
147
- proxy=proxy
148
- ) as response:
149
- response.raise_for_status()
150
- result = await response.json()
151
- yield result.get('output', '')
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
api/providers/blackboxai.py DELETED
@@ -1,215 +0,0 @@
1
- # api/providers/blackboxai.py
2
-
3
- from __future__ import annotations
4
-
5
- import json
6
- from datetime import datetime
7
- import uuid
8
- from typing import Any, Dict, Optional
9
-
10
- import httpx
11
- from api.config import (
12
- MODEL_MAPPING,
13
- headers,
14
- BASE_URL,
15
- MODEL_PREFIXES,
16
- MODEL_REFERERS,
17
- )
18
- from api.models import ChatRequest
19
- from api.logger import setup_logger
20
- from api.image import ImageResponse # Assuming similar structure to GizAI
21
- from api.typing import AsyncResult, Messages
22
- from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
23
-
24
- logger = setup_logger(__name__)
25
-
26
- class BlackBoxAI(AsyncGeneratorProvider, ProviderModelMixin):
27
- url = "https://www.blackbox.ai"
28
- api_endpoint = "https://www.blackbox.ai/api/chat"
29
- working = True
30
-
31
- supports_system_message = True
32
- supports_message_history = True
33
-
34
- # Define BlackBoxAI models
35
- default_model = 'blackboxai'
36
- chat_models = [
37
- 'blackboxai',
38
- 'blackboxai-pro',
39
- 'flux',
40
- 'llama-3.1-8b',
41
- 'llama-3.1-70b',
42
- 'llama-3.1-405b',
43
- 'gpt-4o',
44
- 'gemini-pro',
45
- 'gemini-1.5-flash',
46
- 'claude-sonnet-3.5',
47
- 'PythonAgent',
48
- 'JavaAgent',
49
- 'JavaScriptAgent',
50
- 'HTMLAgent',
51
- 'GoogleCloudAgent',
52
- 'AndroidDeveloper',
53
- 'SwiftDeveloper',
54
- 'Next.jsAgent',
55
- 'MongoDBAgent',
56
- 'PyTorchAgent',
57
- 'ReactAgent',
58
- 'XcodeAgent',
59
- 'AngularJSAgent',
60
- 'RepoMap',
61
- 'gemini-1.5-pro-latest',
62
- 'gemini-1.5-pro',
63
- 'claude-3-5-sonnet-20240620',
64
- 'claude-3-5-sonnet',
65
- 'Niansuh',
66
- ]
67
-
68
- image_models = [] # Add image models if applicable
69
-
70
- models = chat_models + image_models
71
-
72
- model_aliases = {
73
- # Add aliases if any
74
- }
75
-
76
- @classmethod
77
- def get_model(cls, model: str) -> str:
78
- return MODEL_MAPPING.get(model, cls.default_model)
79
-
80
- @classmethod
81
- def is_image_model(cls, model: str) -> bool:
82
- return model in cls.image_models
83
-
84
- @classmethod
85
- async def create_async_generator(
86
- cls,
87
- model: str,
88
- messages: Messages,
89
- proxy: str = None,
90
- **kwargs
91
- ) -> AsyncResult:
92
- model = cls.get_model(model)
93
- model_prefix = MODEL_PREFIXES.get(model, "")
94
- referer_path = MODEL_REFERERS.get(model, f"/?model={model}")
95
- referer_url = f"{BASE_URL}{referer_path}"
96
-
97
- # Update headers with dynamic Referer
98
- dynamic_headers = headers.copy()
99
- dynamic_headers['Referer'] = referer_url
100
-
101
- json_data = {
102
- "messages": [cls.message_to_dict(msg, model_prefix) for msg in messages],
103
- "stream": kwargs.get('stream', False),
104
- "temperature": kwargs.get('temperature', 0.7),
105
- "top_p": kwargs.get('top_p', 0.9),
106
- "max_tokens": kwargs.get('max_tokens', 99999999),
107
- }
108
-
109
- async with httpx.AsyncClient() as client:
110
- try:
111
- if json_data.get("stream"):
112
- async with client.stream(
113
- "POST",
114
- cls.api_endpoint,
115
- headers=dynamic_headers,
116
- json=json_data,
117
- timeout=100,
118
- ) as response:
119
- response.raise_for_status()
120
- async for line in response.aiter_lines():
121
- timestamp = int(datetime.now().timestamp())
122
- if line:
123
- content = line
124
- if content.startswith("$@$v=undefined-rv1$@$"):
125
- content = content[21:]
126
- # Strip the model prefix from the response content
127
- cleaned_content = cls.strip_model_prefix(content, model_prefix)
128
- yield f"data: {json.dumps(cls.create_chat_completion_data(cleaned_content, model, timestamp))}\n\n"
129
-
130
- yield f"data: {json.dumps(cls.create_chat_completion_data('', model, timestamp, 'stop'))}\n\n"
131
- yield "data: [DONE]\n\n"
132
- else:
133
- response = await client.post(
134
- cls.api_endpoint,
135
- headers=dynamic_headers,
136
- json=json_data,
137
- timeout=100,
138
- )
139
- response.raise_for_status()
140
- full_response = response.text
141
- if full_response.startswith("$@$v=undefined-rv1$@$"):
142
- full_response = full_response[21:]
143
- # Strip the model prefix from the full response
144
- cleaned_full_response = cls.strip_model_prefix(full_response, model_prefix)
145
- return {
146
- "id": f"chatcmpl-{uuid.uuid4()}",
147
- "object": "chat.completion",
148
- "created": int(datetime.now().timestamp()),
149
- "model": model,
150
- "choices": [
151
- {
152
- "index": 0,
153
- "message": {"role": "assistant", "content": cleaned_full_response},
154
- "finish_reason": "stop",
155
- }
156
- ],
157
- "usage": None,
158
- }
159
- except httpx.HTTPStatusError as e:
160
- logger.error(f"HTTP error occurred: {e}")
161
- raise HTTPException(status_code=e.response.status_code, detail=str(e))
162
- except httpx.RequestError as e:
163
- logger.error(f"Error occurred during request: {e}")
164
- raise HTTPException(status_code=500, detail=str(e))
165
-
166
- @staticmethod
167
- def message_to_dict(message, model_prefix: Optional[str] = None):
168
- if isinstance(message["content"], str):
169
- content = message["content"]
170
- if model_prefix:
171
- content = f"{model_prefix} {content}"
172
- return {"role": message["role"], "content": content}
173
- elif isinstance(message["content"], list) and len(message["content"]) == 2:
174
- content = message["content"][0]["text"]
175
- if model_prefix:
176
- content = f"{model_prefix} {content}"
177
- return {
178
- "role": message["role"],
179
- "content": content,
180
- "data": {
181
- "imageBase64": message["content"][1]["image_url"]["url"],
182
- "fileText": "",
183
- "title": "snapshot",
184
- },
185
- }
186
- else:
187
- return {"role": message["role"], "content": message["content"]}
188
-
189
- @staticmethod
190
- def strip_model_prefix(content: str, model_prefix: Optional[str] = None) -> str:
191
- """Remove the model prefix from the response content if present."""
192
- if model_prefix and content.startswith(model_prefix):
193
- logger.debug(f"Stripping prefix '{model_prefix}' from content.")
194
- return content[len(model_prefix):].strip()
195
- logger.debug("No prefix to strip from content.")
196
- return content
197
-
198
- @staticmethod
199
- def create_chat_completion_data(
200
- content: str, model: str, timestamp: int, finish_reason: Optional[str] = None
201
- ) -> Dict[str, Any]:
202
- return {
203
- "id": f"chatcmpl-{uuid.uuid4()}",
204
- "object": "chat.completion.chunk",
205
- "created": timestamp,
206
- "model": model,
207
- "choices": [
208
- {
209
- "index": 0,
210
- "delta": {"content": content, "role": "assistant"},
211
- "finish_reason": finish_reason,
212
- }
213
- ],
214
- "usage": None,
215
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
api/routes.py DELETED
@@ -1,59 +0,0 @@
1
- import json
2
- from fastapi import APIRouter, Depends, HTTPException, Request, Response
3
- from fastapi.responses import StreamingResponse
4
- from api.auth import verify_app_secret
5
- from api.config import ALLOWED_MODELS
6
- from api.models import ChatRequest
7
- from api.utils import process_non_streaming_response, process_streaming_response
8
- from api.logger import setup_logger
9
-
10
- logger = setup_logger(__name__)
11
-
12
- router = APIRouter()
13
-
14
- @router.options("/v1/chat/completions")
15
- @router.options("/api/v1/chat/completions")
16
- async def chat_completions_options():
17
- return Response(
18
- status_code=200,
19
- headers={
20
- "Access-Control-Allow-Origin": "*",
21
- "Access-Control-Allow-Methods": "POST, OPTIONS",
22
- "Access-Control-Allow-Headers": "Content-Type, Authorization",
23
- },
24
- )
25
-
26
- @router.get("/v1/models")
27
- @router.get("/api/v1/models")
28
- async def list_models():
29
- return {"object": "list", "data": ALLOWED_MODELS}
30
-
31
- @router.post("/v1/chat/completions")
32
- @router.post("/api/v1/chat/completions")
33
- async def chat_completions(
34
- request: ChatRequest, app_secret: str = Depends(verify_app_secret)
35
- ):
36
- logger.info("Entering chat_completions route")
37
- logger.info(f"Processing chat completion request for model: {request.model}")
38
-
39
- if request.model not in [model["id"] for model in ALLOWED_MODELS]:
40
- raise HTTPException(
41
- status_code=400,
42
- detail=f"Model {request.model} is not allowed. Allowed models are: {', '.join(model['id'] for model in ALLOWED_MODELS)}",
43
- )
44
-
45
- if request.stream:
46
- logger.info("Streaming response")
47
- return StreamingResponse(process_streaming_response(request), media_type="text/event-stream")
48
- else:
49
- logger.info("Non-streaming response")
50
- return await process_non_streaming_response(request)
51
-
52
- @router.route('/')
53
- @router.route('/healthz')
54
- @router.route('/ready')
55
- @router.route('/alive')
56
- @router.route('/status')
57
- @router.get("/health")
58
- def health_check(request: Request):
59
- return Response(content=json.dumps({"status": "ok"}), media_type="application/json")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
api/utils.py DELETED
@@ -1,198 +0,0 @@
1
- from datetime import datetime
2
- import json
3
- from typing import Any, Dict, Optional
4
-
5
- import httpx
6
- from api.config import (
7
- MODEL_MAPPING,
8
- headers,
9
- AGENT_MODE,
10
- TRENDING_AGENT_MODE,
11
- BASE_URL,
12
- MODEL_PREFIXES,
13
- MODEL_REFERERS
14
- )
15
- from fastapi import HTTPException
16
- from api.models import ChatRequest
17
-
18
- from api.logger import setup_logger
19
-
20
- import uuid # Added import for uuid
21
-
22
- logger = setup_logger(__name__)
23
-
24
- def create_chat_completion_data(
25
- content: str, model: str, timestamp: int, finish_reason: Optional[str] = None
26
- ) -> Dict[str, Any]:
27
- return {
28
- "id": f"chatcmpl-{uuid.uuid4()}",
29
- "object": "chat.completion.chunk",
30
- "created": timestamp,
31
- "model": model,
32
- "choices": [
33
- {
34
- "index": 0,
35
- "delta": {"content": content, "role": "assistant"},
36
- "finish_reason": finish_reason,
37
- }
38
- ],
39
- "usage": None,
40
- }
41
-
42
- def message_to_dict(message, model_prefix: Optional[str] = None):
43
- if isinstance(message.content, str):
44
- content = message.content
45
- if model_prefix:
46
- content = f"{model_prefix} {content}"
47
- return {"role": message.role, "content": content}
48
- elif isinstance(message.content, list) and len(message.content) == 2:
49
- content = message.content[0]["text"]
50
- if model_prefix:
51
- content = f"{model_prefix} {content}"
52
- return {
53
- "role": message.role,
54
- "content": content,
55
- "data": {
56
- "imageBase64": message.content[1]["image_url"]["url"],
57
- "fileText": "",
58
- "title": "snapshot",
59
- },
60
- }
61
- else:
62
- return {"role": message.role, "content": message.content}
63
-
64
- def strip_model_prefix(content: str, model_prefix: str) -> str:
65
- """Remove the model prefix from the response content if present."""
66
- if content.startswith(model_prefix):
67
- return content[len(model_prefix):].strip()
68
- return content
69
-
70
- async def process_streaming_response(request: ChatRequest):
71
- agent_mode = AGENT_MODE.get(request.model, {})
72
- trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
73
- model_prefix = MODEL_PREFIXES.get(request.model, "")
74
- referer_path = MODEL_REFERERS.get(request.model, f"/?model={request.model}")
75
- referer_url = f"{BASE_URL}{referer_path}"
76
-
77
- # Update headers with dynamic Referer
78
- dynamic_headers = headers.copy()
79
- dynamic_headers['Referer'] = referer_url
80
-
81
- json_data = {
82
- "messages": [message_to_dict(msg, model_prefix=model_prefix) for msg in request.messages],
83
- "previewToken": None,
84
- "userId": None,
85
- "codeModelMode": True,
86
- "agentMode": agent_mode,
87
- "trendingAgentMode": trending_agent_mode,
88
- "isMicMode": False,
89
- "userSystemPrompt": None,
90
- "maxTokens": request.max_tokens,
91
- "playgroundTopP": request.top_p,
92
- "playgroundTemperature": request.temperature,
93
- "isChromeExt": False,
94
- "githubToken": None,
95
- "clickedAnswer2": False,
96
- "clickedAnswer3": False,
97
- "clickedForceWebSearch": False,
98
- "visitFromDelta": False,
99
- "mobileClient": False,
100
- "userSelectedModel": MODEL_MAPPING.get(request.model, request.model),
101
- }
102
-
103
- async with httpx.AsyncClient() as client:
104
- try:
105
- async with client.stream(
106
- "POST",
107
- f"{BASE_URL}/api/chat",
108
- headers=dynamic_headers,
109
- json=json_data,
110
- timeout=100,
111
- ) as response:
112
- response.raise_for_status()
113
- async for line in response.aiter_lines():
114
- timestamp = int(datetime.now().timestamp())
115
- if line:
116
- content = line
117
- if content.startswith("$@$v=undefined-rv1$@$"):
118
- content = content[21:]
119
- # Strip the model prefix from the response content
120
- cleaned_content = strip_model_prefix(content, model_prefix)
121
- yield f"data: {json.dumps(create_chat_completion_data(cleaned_content, request.model, timestamp))}\n\n"
122
-
123
- yield f"data: {json.dumps(create_chat_completion_data('', request.model, timestamp, 'stop'))}\n\n"
124
- yield "data: [DONE]\n\n"
125
- except httpx.HTTPStatusError as e:
126
- logger.error(f"HTTP error occurred: {e}")
127
- raise HTTPException(status_code=e.response.status_code, detail=str(e))
128
- except httpx.RequestError as e:
129
- logger.error(f"Error occurred during request: {e}")
130
- raise HTTPException(status_code=500, detail=str(e))
131
-
132
- async def process_non_streaming_response(request: ChatRequest):
133
- agent_mode = AGENT_MODE.get(request.model, {})
134
- trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
135
- model_prefix = MODEL_PREFIXES.get(request.model, "")
136
- referer_path = MODEL_REFERERS.get(request.model, f"/?model={request.model}")
137
- referer_url = f"{BASE_URL}{referer_path}"
138
-
139
- # Update headers with dynamic Referer
140
- dynamic_headers = headers.copy()
141
- dynamic_headers['Referer'] = referer_url
142
-
143
- json_data = {
144
- "messages": [message_to_dict(msg, model_prefix=model_prefix) for msg in request.messages],
145
- "previewToken": None,
146
- "userId": None,
147
- "codeModelMode": True,
148
- "agentMode": agent_mode,
149
- "trendingAgentMode": trending_agent_mode,
150
- "isMicMode": False,
151
- "userSystemPrompt": None,
152
- "maxTokens": request.max_tokens,
153
- "playgroundTopP": request.top_p,
154
- "playgroundTemperature": request.temperature,
155
- "isChromeExt": False,
156
- "githubToken": None,
157
- "clickedAnswer2": False,
158
- "clickedAnswer3": False,
159
- "clickedForceWebSearch": False,
160
- "visitFromDelta": False,
161
- "mobileClient": False,
162
- "userSelectedModel": MODEL_MAPPING.get(request.model, request.model),
163
- }
164
- full_response = ""
165
- async with httpx.AsyncClient() as client:
166
- try:
167
- async with client.stream(
168
- method="POST", url=f"{BASE_URL}/api/chat", headers=dynamic_headers, json=json_data
169
- ) as response:
170
- response.raise_for_status()
171
- async for chunk in response.aiter_text():
172
- full_response += chunk
173
- except httpx.HTTPStatusError as e:
174
- logger.error(f"HTTP error occurred: {e}")
175
- raise HTTPException(status_code=e.response.status_code, detail=str(e))
176
- except httpx.RequestError as e:
177
- logger.error(f"Error occurred during request: {e}")
178
- raise HTTPException(status_code=500, detail=str(e))
179
- if full_response.startswith("$@$v=undefined-rv1$@$"):
180
- full_response = full_response[21:]
181
-
182
- # Strip the model prefix from the full response
183
- cleaned_full_response = strip_model_prefix(full_response, model_prefix)
184
-
185
- return {
186
- "id": f"chatcmpl-{uuid.uuid4()}",
187
- "object": "chat.completion",
188
- "created": int(datetime.now().timestamp()),
189
- "model": request.model,
190
- "choices": [
191
- {
192
- "index": 0,
193
- "message": {"role": "assistant", "content": cleaned_full_response},
194
- "finish_reason": "stop",
195
- }
196
- ],
197
- "usage": None,
198
- }