Niansuh commited on
Commit
34226fa
·
verified ·
1 Parent(s): 4b6f3a0

Update main.py

Browse files
Files changed (1) hide show
  1. main.py +39 -23
main.py CHANGED
@@ -3,15 +3,22 @@ from __future__ import annotations
3
  import re
4
  import random
5
  import string
6
- import uuid
7
  import json
 
8
  from aiohttp import ClientSession
9
  from fastapi import FastAPI, HTTPException
10
  from pydantic import BaseModel
11
- from typing import List, Dict, Any, Optional, AsyncGenerator
12
  from datetime import datetime
13
  from fastapi.responses import StreamingResponse
14
 
 
 
 
 
 
 
 
15
  # Mock implementations for ImageResponse and to_data_uri
16
  class ImageResponse:
17
  def __init__(self, url: str, alt: str):
@@ -19,7 +26,6 @@ class ImageResponse:
19
  self.alt = alt
20
 
21
  def to_data_uri(image: Any) -> str:
22
- # Placeholder for actual image encoding
23
  return "data:image/png;base64,..." # Replace with actual base64 data
24
 
25
  class AsyncGeneratorProvider:
@@ -35,7 +41,7 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
35
  supports_stream = True
36
  supports_system_message = True
37
  supports_message_history = True
38
-
39
  default_model = 'blackbox'
40
  models = [
41
  'blackbox',
@@ -60,13 +66,13 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
60
  'llama-3.1-70b': {'mode': True, 'id': "llama-3.1-70b"},
61
  'llama-3.1-405b': {'mode': True, 'id': "llama-3.1-405b"},
62
  }
63
-
64
  userSelectedModel = {
65
  "gpt-4o": "gpt-4o",
66
  "gemini-pro": "gemini-pro",
67
  'claude-sonnet-3.5': "claude-sonnet-3.5",
68
  }
69
-
70
  model_aliases = {
71
  "gemini-flash": "gemini-1.5-flash",
72
  "flux": "ImageGenerationLV45LJp",
@@ -92,12 +98,13 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
92
  image: Optional[Any] = None,
93
  image_name: Optional[str] = None,
94
  **kwargs
95
- ) -> AsyncGenerator:
96
- if not messages:
97
- raise ValueError("Messages cannot be empty")
98
-
99
  model = cls.get_model(model)
100
 
 
 
 
 
101
  headers = {
102
  "accept": "*/*",
103
  "accept-language": "en-US,en;q=0.9",
@@ -117,11 +124,11 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
117
 
118
  if model in cls.userSelectedModel:
119
  prefix = f"@{cls.userSelectedModel[model]}"
120
- if not messages[0]['content'].startswith(prefix):
121
  messages[0]['content'] = f"{prefix} {messages[0]['content']}"
122
 
123
  async with ClientSession(headers=headers) as session:
124
- if image is not None:
125
  messages[-1]["data"] = {
126
  "fileText": image_name,
127
  "imageBase64": to_data_uri(image)
@@ -173,7 +180,7 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
173
  else:
174
  async for chunk in response.content.iter_any():
175
  if chunk:
176
- decoded_chunk = chunk.decode(errors='ignore') # Handle decoding errors
177
  decoded_chunk = re.sub(r'\$@\$v=[^$]+\$@\$', '', decoded_chunk)
178
  if decoded_chunk.strip():
179
  yield decoded_chunk
@@ -188,7 +195,7 @@ class Message(BaseModel):
188
  class ChatRequest(BaseModel):
189
  model: str
190
  messages: List[Message]
191
- stream: Optional[bool] = False # Add this for streaming
192
 
193
  def create_response(content: str, model: str, finish_reason: Optional[str] = None) -> Dict[str, Any]:
194
  return {
@@ -208,20 +215,26 @@ def create_response(content: str, model: str, finish_reason: Optional[str] = Non
208
 
209
  @app.post("/niansuhai/v1/chat/completions")
210
  async def chat_completions(request: ChatRequest):
 
 
 
 
211
  messages = [{"role": msg.role, "content": msg.content} for msg in request.messages]
212
 
213
- async_generator = Blackbox.create_async_generator(
214
- model=request.model,
215
- messages=messages,
216
- image=None, # Pass the image if required
217
- image_name=None # Pass image name if required
218
- )
 
 
 
219
 
220
  if request.stream:
221
  async def generate():
222
  async for chunk in async_generator:
223
  if isinstance(chunk, ImageResponse):
224
- # Format the response as a Markdown image
225
  image_markdown = f"![image]({chunk.url})"
226
  yield f"data: {json.dumps(create_response(image_markdown, request.model))}\n\n"
227
  else:
@@ -233,10 +246,9 @@ async def chat_completions(request: ChatRequest):
233
  response_content = ""
234
  async for chunk in async_generator:
235
  if isinstance(chunk, ImageResponse):
236
- # Add Markdown image to the response
237
  response_content += f"![image]({chunk.url})\n"
238
  else:
239
- response_content += chunk # Concatenate text responses
240
 
241
  return {
242
  "id": f"chatcmpl-{uuid.uuid4()}",
@@ -255,3 +267,7 @@ async def chat_completions(request: ChatRequest):
255
  ],
256
  "usage": None,
257
  }
 
 
 
 
 
3
  import re
4
  import random
5
  import string
 
6
  import json
7
+ import uuid
8
  from aiohttp import ClientSession
9
  from fastapi import FastAPI, HTTPException
10
  from pydantic import BaseModel
11
+ from typing import List, Dict, Any, Optional
12
  from datetime import datetime
13
  from fastapi.responses import StreamingResponse
14
 
15
+ # Custom exception for model not working
16
+ class ModelNotWorkingException(Exception):
17
+ def __init__(self, model: str):
18
+ self.model = model
19
+ self.message = f"The model '{model}' is currently not working. Please wait for NiansuhAI to fix this. Thank you for your patience."
20
+ super().__init__(self.message)
21
+
22
  # Mock implementations for ImageResponse and to_data_uri
23
  class ImageResponse:
24
  def __init__(self, url: str, alt: str):
 
26
  self.alt = alt
27
 
28
  def to_data_uri(image: Any) -> str:
 
29
  return "data:image/png;base64,..." # Replace with actual base64 data
30
 
31
  class AsyncGeneratorProvider:
 
41
  supports_stream = True
42
  supports_system_message = True
43
  supports_message_history = True
44
+
45
  default_model = 'blackbox'
46
  models = [
47
  'blackbox',
 
66
  'llama-3.1-70b': {'mode': True, 'id': "llama-3.1-70b"},
67
  'llama-3.1-405b': {'mode': True, 'id': "llama-3.1-405b"},
68
  }
69
+
70
  userSelectedModel = {
71
  "gpt-4o": "gpt-4o",
72
  "gemini-pro": "gemini-pro",
73
  'claude-sonnet-3.5': "claude-sonnet-3.5",
74
  }
75
+
76
  model_aliases = {
77
  "gemini-flash": "gemini-1.5-flash",
78
  "flux": "ImageGenerationLV45LJp",
 
98
  image: Optional[Any] = None,
99
  image_name: Optional[str] = None,
100
  **kwargs
101
+ ) -> Any:
 
 
 
102
  model = cls.get_model(model)
103
 
104
+ # Check if the model is working
105
+ if not cls.working or model not in cls.models:
106
+ raise ModelNotWorkingException(model)
107
+
108
  headers = {
109
  "accept": "*/*",
110
  "accept-language": "en-US,en;q=0.9",
 
124
 
125
  if model in cls.userSelectedModel:
126
  prefix = f"@{cls.userSelectedModel[model]}"
127
+ if messages and not messages[0]['content'].startswith(prefix):
128
  messages[0]['content'] = f"{prefix} {messages[0]['content']}"
129
 
130
  async with ClientSession(headers=headers) as session:
131
+ if image is not None and messages:
132
  messages[-1]["data"] = {
133
  "fileText": image_name,
134
  "imageBase64": to_data_uri(image)
 
180
  else:
181
  async for chunk in response.content.iter_any():
182
  if chunk:
183
+ decoded_chunk = chunk.decode(errors='ignore')
184
  decoded_chunk = re.sub(r'\$@\$v=[^$]+\$@\$', '', decoded_chunk)
185
  if decoded_chunk.strip():
186
  yield decoded_chunk
 
195
  class ChatRequest(BaseModel):
196
  model: str
197
  messages: List[Message]
198
+ stream: Optional[bool] = False
199
 
200
  def create_response(content: str, model: str, finish_reason: Optional[str] = None) -> Dict[str, Any]:
201
  return {
 
215
 
216
  @app.post("/niansuhai/v1/chat/completions")
217
  async def chat_completions(request: ChatRequest):
218
+ valid_models = Blackbox.models + list(Blackbox.userSelectedModel.keys()) + list(Blackbox.model_aliases.keys())
219
+ if request.model not in valid_models:
220
+ raise HTTPException(status_code=400, detail=f"Invalid model name: {request.model}. Valid models are: {valid_models}")
221
+
222
  messages = [{"role": msg.role, "content": msg.content} for msg in request.messages]
223
 
224
+ try:
225
+ async_generator = await Blackbox.create_async_generator(
226
+ model=request.model,
227
+ messages=messages,
228
+ image=None,
229
+ image_name=None
230
+ )
231
+ except ModelNotWorkingException as e:
232
+ raise HTTPException(status_code=503, detail=str(e))
233
 
234
  if request.stream:
235
  async def generate():
236
  async for chunk in async_generator:
237
  if isinstance(chunk, ImageResponse):
 
238
  image_markdown = f"![image]({chunk.url})"
239
  yield f"data: {json.dumps(create_response(image_markdown, request.model))}\n\n"
240
  else:
 
246
  response_content = ""
247
  async for chunk in async_generator:
248
  if isinstance(chunk, ImageResponse):
 
249
  response_content += f"![image]({chunk.url})\n"
250
  else:
251
+ response_content += chunk
252
 
253
  return {
254
  "id": f"chatcmpl-{uuid.uuid4()}",
 
267
  ],
268
  "usage": None,
269
  }
270
+
271
+ @app.get("/niansuhai/v1/models")
272
+ async def get_models():
273
+ return {"models": Blackbox.models}