Niansuh commited on
Commit
915b036
·
verified ·
1 Parent(s): 77af3d8

Update main.py

Browse files
Files changed (1) hide show
  1. main.py +43 -110
main.py CHANGED
@@ -14,28 +14,17 @@ from pydantic import BaseModel
14
  from starlette.middleware.cors import CORSMiddleware
15
  from starlette.responses import StreamingResponse, Response
16
 
17
- # ==============================
18
- # Configuration and Setup
19
- # ==============================
20
-
21
- # Configure logging
22
  logging.basicConfig(
23
- level=logging.DEBUG, # Set to DEBUG for detailed logs
24
- format="%(asctime)s - %(name)s - %(levelname)s - %(message)s"
25
  )
26
  logger = logging.getLogger(__name__)
27
 
28
- # Load environment variables from .env file
29
  load_dotenv()
30
-
31
- # Initialize FastAPI app
32
  app = FastAPI()
33
-
34
- # Constants and configurations
35
  BASE_URL = "https://aichatonlineorg.erweima.ai/aichatonline"
36
- APP_SECRET = os.getenv("APP_SECRET", "666")
37
- ACCESS_TOKEN = os.getenv("SD_ACCESS_TOKEN", "")
38
- headers = {
39
  'accept': '*/*',
40
  'accept-language': 'en-US,en;q=0.9',
41
  'authorization': f'Bearer {ACCESS_TOKEN}',
@@ -43,13 +32,14 @@ headers = {
43
  'origin': 'chrome-extension://difoiogjjojoaoomphldepapgpbgkhkb',
44
  'pragma': 'no-cache',
45
  'priority': 'u=1, i',
 
46
  'sec-fetch-dest': 'empty',
47
  'sec-fetch-mode': 'cors',
48
  'sec-fetch-site': 'none',
49
- 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/130.0.0.0 Safari/537.36',
 
50
  }
51
 
52
- # Define allowed models
53
  ALLOWED_MODELS = [
54
  {"id": "claude-3.5-sonnet", "name": "claude-3.5-sonnet"},
55
  {"id": "claude-3-opus", "name": "claude-3-opus"},
@@ -59,38 +49,29 @@ ALLOWED_MODELS = [
59
  {"id": "o1-mini", "name": "o1-mini"},
60
  {"id": "gpt-4o-mini", "name": "gpt-4o-mini"},
61
  ]
62
-
63
- # Configure CORS middleware
64
  app.add_middleware(
65
  CORSMiddleware,
66
- allow_origins=["*"], # Allow all origins; restrict if necessary
67
  allow_credentials=True,
68
- allow_methods=["*"], # Allow all HTTP methods
69
  allow_headers=["*"], # Allow all headers
70
  )
71
-
72
- # Security configuration
73
  security = HTTPBearer()
74
 
75
- # ==============================
76
- # Pydantic Models
77
- # ==============================
78
 
79
  class Message(BaseModel):
80
  role: str
81
  content: str
82
 
 
83
  class ChatRequest(BaseModel):
84
  model: str
85
  messages: List[Message]
86
  stream: Optional[bool] = False
87
 
88
- # ==============================
89
- # Helper Functions
90
- # ==============================
91
 
92
- def simulate_data(content: str, model: str) -> Dict[str, Any]:
93
- """Simulate chunked response data."""
94
  return {
95
  "id": f"chatcmpl-{uuid.uuid4()}",
96
  "object": "chat.completion.chunk",
@@ -106,8 +87,8 @@ def simulate_data(content: str, model: str) -> Dict[str, Any]:
106
  "usage": None,
107
  }
108
 
109
- def stop_data(content: str, model: str) -> Dict[str, Any]:
110
- """Simulate the final chunk indicating the end of the response."""
111
  return {
112
  "id": f"chatcmpl-{uuid.uuid4()}",
113
  "object": "chat.completion.chunk",
@@ -122,9 +103,9 @@ def stop_data(content: str, model: str) -> Dict[str, Any]:
122
  ],
123
  "usage": None,
124
  }
125
-
 
126
  def create_chat_completion_data(content: str, model: str, finish_reason: Optional[str] = None) -> Dict[str, Any]:
127
- """Create a structured chat completion data chunk."""
128
  return {
129
  "id": f"chatcmpl-{uuid.uuid4()}",
130
  "object": "chat.completion.chunk",
@@ -140,23 +121,15 @@ def create_chat_completion_data(content: str, model: str, finish_reason: Optiona
140
  "usage": None,
141
  }
142
 
143
- def verify_app_secret(credentials: HTTPAuthorizationCredentials = Depends(security)) -> str:
144
- """Verify the provided APP_SECRET."""
145
  if credentials.credentials != APP_SECRET:
146
  raise HTTPException(status_code=403, detail="Invalid APP_SECRET")
147
  return credentials.credentials
148
 
149
- def replace_escaped_newlines(input_string: str) -> str:
150
- """Replace escaped newline characters with actual newlines."""
151
- return input_string.replace("\\n", "\n")
152
-
153
- # ==============================
154
- # API Endpoints
155
- # ==============================
156
 
157
  @app.options("/hf/v1/chat/completions")
158
  async def chat_completions_options():
159
- """Handle CORS preflight requests."""
160
  return Response(
161
  status_code=200,
162
  headers={
@@ -166,31 +139,32 @@ async def chat_completions_options():
166
  },
167
  )
168
 
 
 
 
 
 
169
  @app.get("/hf/v1/models")
170
  async def list_models():
171
- """List all allowed models."""
172
  return {"object": "list", "data": ALLOWED_MODELS}
173
 
 
174
  @app.post("/hf/v1/chat/completions")
175
  async def chat_completions(
176
  request: ChatRequest, app_secret: str = Depends(verify_app_secret)
177
  ):
178
- """Handle chat completion requests."""
179
  logger.info(f"Received chat completion request for model: {request.model}")
180
 
181
- # Validate model
182
- allowed_model_ids = [model['id'] for model in ALLOWED_MODELS]
183
- if request.model not in allowed_model_ids:
184
  raise HTTPException(
185
  status_code=400,
186
- detail=f"Model {request.model} is not allowed. Allowed models are: {', '.join(allowed_model_ids)}",
187
  )
188
-
189
  # Generate a UUID
190
  original_uuid = uuid.uuid4()
191
  uuid_str = str(original_uuid).replace("-", "")
192
 
193
- # Construct the payload to send to the external API
194
  json_data = {
195
  'prompt': "\n".join(
196
  [
@@ -200,12 +174,12 @@ async def chat_completions(
200
  ),
201
  'stream': True,
202
  'app_name': 'ChitChat_Edge_Ext',
203
- 'app_version': '4.28.0',
204
  'tz_name': 'Asia/Karachi',
205
- 'cid': 'C092SEMXM9BJ',
206
  'model': request.model,
207
- 'search': False, # Disable search
208
- 'auto_search': False, # Disable auto_search
209
  'filter_search_history': False,
210
  'from': 'chat',
211
  'group_id': 'default',
@@ -219,7 +193,7 @@ async def chat_completions(
219
  },
220
  'tools': {
221
  'auto': [
222
- 'search', # Re-add search to maintain API expectations
223
  'text_to_image',
224
  'data_analysis',
225
  ],
@@ -230,53 +204,17 @@ async def chat_completions(
230
  },
231
  }
232
 
233
- # Define the asynchronous generator for streaming responses
234
  async def generate():
235
  async with httpx.AsyncClient() as client:
236
  try:
237
- async with client.stream('POST', 'https://sider.ai/api/v3/completion/text', headers=headers, json=json_data, timeout=120.0) as response:
238
  response.raise_for_status()
239
  async for line in response.aiter_lines():
240
- if line:
241
- logger.debug(f"Raw line received: {line}") # Log raw line
242
  if line and ("[DONE]" not in line):
243
- try:
244
- # Remove 'data: ' prefix if present
245
- if line.startswith("data: "):
246
- line_content = line[6:]
247
- else:
248
- line_content = line
249
-
250
- # Log the content before parsing
251
- logger.debug(f"Line content to parse: {line_content}")
252
-
253
- # Check if the line is not empty after stripping
254
- if not line_content.strip():
255
- logger.warning("Received an empty line, skipping.")
256
- continue
257
-
258
- # Attempt to parse JSON
259
- parsed_json = json.loads(line_content)
260
-
261
- # Ensure 'data' key exists
262
- if "data" not in parsed_json:
263
- logger.error(f"'data' key not found in the response: {parsed_json}")
264
- continue
265
-
266
- content_data = parsed_json["data"]
267
-
268
- # Extract text content if available
269
- text_content = content_data.get("text", "")
270
-
271
- # Yield the formatted data
272
- yield f"data: {json.dumps(create_chat_completion_data(text_content, request.model))}\n\n"
273
- except json.JSONDecodeError as e:
274
- logger.error(f"JSON decode error: {e} | Line: {line_content}")
275
- continue
276
- else:
277
- if line and "[DONE]" in line:
278
- yield f"data: {json.dumps(create_chat_completion_data('', request.model, 'stop'))}\n\n"
279
- yield "data: [DONE]\n\n"
280
  except httpx.HTTPStatusError as e:
281
  logger.error(f"HTTP error occurred: {e}")
282
  raise HTTPException(status_code=e.response.status_code, detail=str(e))
@@ -292,14 +230,11 @@ async def chat_completions(
292
  full_response = ""
293
  async for chunk in generate():
294
  if chunk.startswith("data: ") and not chunk[6:].startswith("[DONE]"):
295
- try:
296
- data = json.loads(chunk[6:])
297
- if data["choices"][0]["delta"].get("content"):
298
- full_response += data["choices"][0]["delta"]["content"]
299
- except json.JSONDecodeError as e:
300
- logger.error(f"JSON decode error in non-streaming response: {e}")
301
- continue
302
-
303
  return {
304
  "id": f"chatcmpl-{uuid.uuid4()}",
305
  "object": "chat.completion",
@@ -315,9 +250,7 @@ async def chat_completions(
315
  "usage": None,
316
  }
317
 
318
- # ==============================
319
- # Entry Point
320
- # ==============================
321
 
322
  if __name__ == "__main__":
323
  uvicorn.run(app, host="0.0.0.0", port=7860)
 
14
  from starlette.middleware.cors import CORSMiddleware
15
  from starlette.responses import StreamingResponse, Response
16
 
 
 
 
 
 
17
  logging.basicConfig(
18
+ level=logging.INFO, format="%(asctime)s - %(name)s - %(levelname)s - %(message)s"
 
19
  )
20
  logger = logging.getLogger(__name__)
21
 
 
22
  load_dotenv()
 
 
23
  app = FastAPI()
 
 
24
  BASE_URL = "https://aichatonlineorg.erweima.ai/aichatonline"
25
+ APP_SECRET = os.getenv("APP_SECRET","666")
26
+ ACCESS_TOKEN = os.getenv("SD_ACCESS_TOKEN","")
27
+ headers = {
28
  'accept': '*/*',
29
  'accept-language': 'en-US,en;q=0.9',
30
  'authorization': f'Bearer {ACCESS_TOKEN}',
 
32
  'origin': 'chrome-extension://difoiogjjojoaoomphldepapgpbgkhkb',
33
  'pragma': 'no-cache',
34
  'priority': 'u=1, i',
35
+ 'cookie': 'lang=en; source=gg; p1=pricing; p2=search; _clck=ih7kjx%7C2%7Cfqq%7C0%7C1774; _gcl_gs=2.1.k1$i1731168118$u7016880; _ga=GA1.1.1378917294.1731168124; _gcl_au=1.1.436410932.1731168125; _fbp=fb.1.1731168125483.909164969447382788; _gcl_aw=GCL.1731168126.EAIaIQobChMI_aOK5c_PiQMVAqSDBx2OJh5bEAAYASAAEgJr9PD_BwE; __stripe_mid=b2de6d00-4ac3-4c15-9ba8-355dd63bbdf0cbeb53; _uetsid=f73638b09eb311efa0cf6b0f13512e16; _uetvid=f73635709eb311efbd8d55293947782a; token=Bearer%20eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1c2VyX2lkIjoxNTYzMzI1NSwicmVnaXN0ZXJfdHlwZSI6Im9hdXRoMiIsImFwcF9uYW1lIjoiQ2hpdENoYXRfV2ViIiwidG9rZW5faWQiOiI2MGRmZGMwMi1lY2QzLTQzNzktYjMzNy1kYmNlNGY5M2M1N2IiLCJpc3MiOiJzaWRlci5haSIsImF1ZCI6WyIiXSwiZXhwIjoxNzYyMjcyNTk5LCJuYmYiOjE3MzExNjg1OTksImlhdCI6MTczMTE2ODU5OX0.cT9hXiCFm-4Or2RV5Lf3IY-blu40UOvjqD01BGoDwh8; refresh_token=discard; userinfo-avatar=https://chitchat-avatar.s3.amazonaws.com/eb0e281f19c442d681cf552f3b8896dd-1731167278.png; userinfo-name=Chat%20GPT%20free; userinfo-type=oauth2; CloudFront-Policy=eyJTdGF0ZW1lbnQiOlt7IlJlc291cmNlIjoiaHR0cHM6Ly9maWxlLWNkbi5zaWRlci5haS8qL1UwMUFIRUc4NkVBLyoiLCJDb25kaXRpb24iOnsiRGF0ZUxlc3NUaGFuIjp7IkFXUzpFcG9jaFRpbWUiOjE3MzM3NjA2MDF9fX1dfQ__; CloudFront-Signature=fuXgBZW3E5TCNvvi8mb8DKpNTneaPIGunH~zxWPoVK~OaG-n6PRVz2qlxOFCY3HLTZyA72mQ4T2OtKCZGoWCTL4QL6DP5LnRw7HPt50K~D8Wte5M3GQjuSeuBe~mK44Sk~Xqn1tOa2dmOtri84vlLdo7ud4ZGuMML60AUve13l7eS8uAJO88cQ9rhdZMJ26opJhayI0MSJgCFZymzMm1iEwwSF3ufV5c2elpTuKqVTP2HqZGHzgJQvRl7U~gU4qFiH7KxCG0MF5X0HDsz3Pn0qghNle~FLAtCumI-mzoGNCXEM9~qpYAXJLO3a--ThhnbTrWsPLl2l~zCRiKQ5wB3g__; CloudFront-Key-Pair-Id=K344F5VVSSM536; _rdt_uuid=1731168120149.9c981ceb-cb94-490d-99d8-0f1530348c3c; _clsk=r6ig1v%7C1731170609000%7C8%7C1%7Cu.clarity.ms%2Fcollect; _ga_0PRFKME4HP=GS1.1.1731171204.2.1.1731171205.59.0.0',
36
  'sec-fetch-dest': 'empty',
37
  'sec-fetch-mode': 'cors',
38
  'sec-fetch-site': 'none',
39
+ 'sec-fetch-site': 'none',
40
+ 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/130.0.0.0 Safari/537.36',
41
  }
42
 
 
43
  ALLOWED_MODELS = [
44
  {"id": "claude-3.5-sonnet", "name": "claude-3.5-sonnet"},
45
  {"id": "claude-3-opus", "name": "claude-3-opus"},
 
49
  {"id": "o1-mini", "name": "o1-mini"},
50
  {"id": "gpt-4o-mini", "name": "gpt-4o-mini"},
51
  ]
52
+ # Configure CORS
 
53
  app.add_middleware(
54
  CORSMiddleware,
55
+ allow_origins=["*"], # Allow all sources, you can restrict specific sources if needed
56
  allow_credentials=True,
57
+ allow_methods=["*"], # All methods allowed
58
  allow_headers=["*"], # Allow all headers
59
  )
 
 
60
  security = HTTPBearer()
61
 
 
 
 
62
 
63
  class Message(BaseModel):
64
  role: str
65
  content: str
66
 
67
+
68
  class ChatRequest(BaseModel):
69
  model: str
70
  messages: List[Message]
71
  stream: Optional[bool] = False
72
 
 
 
 
73
 
74
+ def simulate_data(content, model):
 
75
  return {
76
  "id": f"chatcmpl-{uuid.uuid4()}",
77
  "object": "chat.completion.chunk",
 
87
  "usage": None,
88
  }
89
 
90
+
91
+ def stop_data(content, model):
92
  return {
93
  "id": f"chatcmpl-{uuid.uuid4()}",
94
  "object": "chat.completion.chunk",
 
103
  ],
104
  "usage": None,
105
  }
106
+
107
+
108
  def create_chat_completion_data(content: str, model: str, finish_reason: Optional[str] = None) -> Dict[str, Any]:
 
109
  return {
110
  "id": f"chatcmpl-{uuid.uuid4()}",
111
  "object": "chat.completion.chunk",
 
121
  "usage": None,
122
  }
123
 
124
+
125
+ def verify_app_secret(credentials: HTTPAuthorizationCredentials = Depends(security)):
126
  if credentials.credentials != APP_SECRET:
127
  raise HTTPException(status_code=403, detail="Invalid APP_SECRET")
128
  return credentials.credentials
129
 
 
 
 
 
 
 
 
130
 
131
  @app.options("/hf/v1/chat/completions")
132
  async def chat_completions_options():
 
133
  return Response(
134
  status_code=200,
135
  headers={
 
139
  },
140
  )
141
 
142
+
143
+ def replace_escaped_newlines(input_string: str) -> str:
144
+ return input_string.replace("\\n", "\n")
145
+
146
+
147
  @app.get("/hf/v1/models")
148
  async def list_models():
 
149
  return {"object": "list", "data": ALLOWED_MODELS}
150
 
151
+
152
  @app.post("/hf/v1/chat/completions")
153
  async def chat_completions(
154
  request: ChatRequest, app_secret: str = Depends(verify_app_secret)
155
  ):
 
156
  logger.info(f"Received chat completion request for model: {request.model}")
157
 
158
+ if request.model not in [model['id'] for model in ALLOWED_MODELS]:
 
 
159
  raise HTTPException(
160
  status_code=400,
161
+ detail=f"Model {request.model} is not allowed. Allowed models are: {', '.join(model['id'] for model in ALLOWED_MODELS)}",
162
  )
 
163
  # Generate a UUID
164
  original_uuid = uuid.uuid4()
165
  uuid_str = str(original_uuid).replace("-", "")
166
 
167
+ # Using the OpenAI API
168
  json_data = {
169
  'prompt': "\n".join(
170
  [
 
174
  ),
175
  'stream': True,
176
  'app_name': 'ChitChat_Edge_Ext',
177
+ 'app_version': '4.26.1',
178
  'tz_name': 'Asia/Karachi',
179
+ 'cid': '',
180
  'model': request.model,
181
+ 'search': False,
182
+ 'auto_search': False,
183
  'filter_search_history': False,
184
  'from': 'chat',
185
  'group_id': 'default',
 
193
  },
194
  'tools': {
195
  'auto': [
196
+ 'search',
197
  'text_to_image',
198
  'data_analysis',
199
  ],
 
204
  },
205
  }
206
 
 
207
  async def generate():
208
  async with httpx.AsyncClient() as client:
209
  try:
210
+ async with client.stream('POST', 'https://sider.ai/api/v2/completion/text', headers=headers, json=json_data, timeout=120.0) as response:
211
  response.raise_for_status()
212
  async for line in response.aiter_lines():
 
 
213
  if line and ("[DONE]" not in line):
214
+ content = json.loads(line[5:])["data"]
215
+ yield f"data: {json.dumps(create_chat_completion_data(content.get('text',''), request.model))}\n\n"
216
+ yield f"data: {json.dumps(create_chat_completion_data('', request.model, 'stop'))}\n\n"
217
+ yield "data: [DONE]\n\n"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
218
  except httpx.HTTPStatusError as e:
219
  logger.error(f"HTTP error occurred: {e}")
220
  raise HTTPException(status_code=e.response.status_code, detail=str(e))
 
230
  full_response = ""
231
  async for chunk in generate():
232
  if chunk.startswith("data: ") and not chunk[6:].startswith("[DONE]"):
233
+ # print(chunk)
234
+ data = json.loads(chunk[6:])
235
+ if data["choices"][0]["delta"].get("content"):
236
+ full_response += data["choices"][0]["delta"]["content"]
237
+
 
 
 
238
  return {
239
  "id": f"chatcmpl-{uuid.uuid4()}",
240
  "object": "chat.completion",
 
250
  "usage": None,
251
  }
252
 
253
+
 
 
254
 
255
  if __name__ == "__main__":
256
  uvicorn.run(app, host="0.0.0.0", port=7860)