isididiidid commited on
Commit
06e6d5a
·
verified ·
1 Parent(s): faefd73

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +85 -121
app.py CHANGED
@@ -1,4 +1,11 @@
1
- import json
 
 
 
 
 
 
 
2
  import time
3
  import asyncio
4
  import uvicorn
@@ -30,6 +37,10 @@ app = FastAPI(
30
  version="1.0.0"
31
  )
32
 
 
 
 
 
33
  # 添加CORS中间件
34
  app.add_middleware(
35
  CORSMiddleware,
@@ -40,7 +51,7 @@ app.add_middleware(
40
  )
41
 
42
  # 配置
43
- DEEPSIDER_API_BASE = "https://api.chargpt.ai/api/v2"
44
  TOKEN_INDEX = 0
45
 
46
  # 模型映射表
@@ -56,7 +67,6 @@ MODEL_MAPPING = {
56
  "claude-3.7-sonnet": "anthropic/claude-3.7-sonnet",
57
  }
58
 
59
- # 请求头
60
  def get_headers(api_key):
61
  global TOKEN_INDEX
62
  # 检查是否包含多个token(用逗号分隔)
@@ -70,21 +80,11 @@ def get_headers(api_key):
70
  current_token = api_key
71
 
72
  return {
73
- "accept": "*/*",
74
- "accept-encoding": "gzip, deflate, br, zstd",
75
- "accept-language": "en-US,en;q=0.9,zh-CN;q=0.8,zh;q=0.7",
76
  "content-type": "application/json",
77
- "origin": "chrome-extension://client",
78
- "i-lang": "zh-CN",
79
- "i-version": "1.1.64",
80
- "sec-ch-ua": '"Chromium";v="134", "Not:A-Brand";v="24"',
81
- "sec-ch-ua-mobile": "?0",
82
- "sec-ch-ua-platform": "Windows",
83
- "sec-fetch-dest": "empty",
84
- "sec-fetch-mode": "cors",
85
- "sec-fetch-site": "cross-site",
86
  "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/134.0.0.0 Safari/537.36",
87
- "authorization": f"Bearer {current_token.strip()}"
 
88
  }
89
 
90
  # OpenAI API请求模型
@@ -169,26 +169,15 @@ def map_openai_to_deepsider_model(model: str) -> str:
169
 
170
  def format_messages_for_deepsider(messages: List[ChatMessage]) -> str:
171
  """格式化消息列表为DeepSider API所需的提示格式"""
172
- prompt = ""
 
173
  for msg in messages:
174
- role = msg.role
175
- # 将OpenAI的角色映射到DeepSider能理解的格式
176
- if role == "system":
177
- # 系统消息放在开头 作为指导
178
- prompt = f"{msg.content}\n\n" + prompt
179
- elif role == "user":
180
- prompt += f"Human: {msg.content}\n\n"
181
- elif role == "assistant":
182
- prompt += f"Assistant: {msg.content}\n\n"
183
  else:
184
- # 其他角色按用户处理
185
- prompt += f"Human ({role}): {msg.content}\n\n"
186
-
187
- # 如果最后一个消息不是用户的 添加一个Human前缀引导模型回答
188
- if messages and messages[-1].role != "user":
189
- prompt += "Human: "
190
 
191
- return prompt.strip()
192
 
193
  async def generate_openai_response(full_response: str, request_id: str, model: str) -> Dict:
194
  """生成符合OpenAI API响应格式的完整响应"""
@@ -218,72 +207,16 @@ async def generate_openai_response(full_response: str, request_id: str, model: s
218
  async def stream_openai_response(response, request_id: str, model: str, api_key, token_index):
219
  """流式返回OpenAI API格式的响应"""
220
  timestamp = int(time.time())
221
- full_response = ""
222
 
223
  try:
224
- # 将DeepSider响应流转换为OpenAI流格式
225
- for line in response.iter_lines():
226
- if not line:
227
- continue
228
 
229
- if line.startswith(b'data: '):
230
- try:
231
- data = json.loads(line[6:].decode('utf-8'))
232
-
233
- if data.get('code') == 202 and data.get('data', {}).get('type') == "chat":
234
- # 获取正文内容
235
- content = data.get('data', {}).get('content', '')
236
- if content:
237
- full_response += content
238
-
239
- # 生成OpenAI格式的流式响应
240
- chunk = {
241
- "id": f"chatcmpl-{request_id}",
242
- "object": "chat.completion.chunk",
243
- "created": timestamp,
244
- "model": model,
245
- "choices": [
246
- {
247
- "index": 0,
248
- "delta": {
249
- "content": content
250
- },
251
- "finish_reason": None
252
- }
253
- ]
254
- }
255
- yield f"data: {json.dumps(chunk)}\n\n"
256
-
257
- elif data.get('code') == 203:
258
- # 生成完成信号
259
- chunk = {
260
- "id": f"chatcmpl-{request_id}",
261
- "object": "chat.completion.chunk",
262
- "created": timestamp,
263
- "model": model,
264
- "choices": [
265
- {
266
- "index": 0,
267
- "delta": {},
268
- "finish_reason": "stop"
269
- }
270
- ]
271
- }
272
- yield f"data: {json.dumps(chunk)}\n\n"
273
- yield "data: [DONE]\n\n"
274
-
275
- except json.JSONDecodeError:
276
- logger.warning(f"无法解析响应: {line}")
277
-
278
  except Exception as e:
279
  logger.error(f"流式响应处理出错: {str(e)}")
280
 
281
- # 尝试使用下一个Token
282
- tokens = api_key.split(',')
283
- if len(tokens) > 1:
284
- logger.info(f"尝试使用下一个Token重试请求")
285
- # 目前我们不在这里实现自动重试,只记录错误
286
-
287
  # 返回错误信息
288
  error_chunk = {
289
  "id": f"chatcmpl-{request_id}",
@@ -306,7 +239,16 @@ async def stream_openai_response(response, request_id: str, model: str, api_key,
306
  # 路由定义
307
  @app.get("/")
308
  async def root():
309
- return {"message": "OpenAI API Proxy服务已启动 连接至DeepSider API"}
 
 
 
 
 
 
 
 
 
310
 
311
  @app.get("/v1/models")
312
  async def list_models(api_key: str = Depends(verify_api_key)):
@@ -348,8 +290,7 @@ async def create_chat_completion(
348
  payload = {
349
  "model": deepsider_model,
350
  "prompt": prompt,
351
- "webAccess": "close", # 默认关闭网络访问
352
- "timezone": "Asia/Shanghai"
353
  }
354
 
355
  # 获取请求头(包含选择的token)
@@ -359,12 +300,17 @@ async def create_chat_completion(
359
  current_token_index = (TOKEN_INDEX - 1) % len(tokens) if len(tokens) > 0 else 0
360
 
361
  try:
 
 
 
 
362
  # 发送请求到DeepSider API
363
  response = requests.post(
364
- f"{DEEPSIDER_API_BASE}/chat/conversation",
365
  headers=headers,
366
  json=payload,
367
- stream=True
 
368
  )
369
 
370
  # 检查响应状态
@@ -374,10 +320,19 @@ async def create_chat_completion(
374
  error_data = response.json()
375
  error_msg += f" - {error_data.get('message', '')}"
376
  except:
377
- error_msg += f" - {response.text}"
 
 
 
378
 
379
  logger.error(error_msg)
380
- raise HTTPException(status_code=response.status_code, detail="API请求失败")
 
 
 
 
 
 
381
 
382
  # 处理流式或非流式响应
383
  if chat_request.stream:
@@ -387,26 +342,22 @@ async def create_chat_completion(
387
  media_type="text/event-stream"
388
  )
389
  else:
390
- # 收集完整响应
391
- full_response = ""
392
- for line in response.iter_lines():
393
- if not line:
394
- continue
395
-
396
- if line.startswith(b'data: '):
397
- try:
398
- data = json.loads(line[6:].decode('utf-8'))
399
-
400
- if data.get('code') == 202 and data.get('data', {}).get('type') == "chat":
401
- content = data.get('data', {}).get('content', '')
402
- if content:
403
- full_response += content
404
-
405
- except json.JSONDecodeError:
406
- pass
407
-
408
- # 返回OpenAI格式的完整响应
409
- return await generate_openai_response(full_response, request_id, chat_request.model)
410
 
411
  except HTTPException:
412
  raise
@@ -459,7 +410,17 @@ async def not_found_handler(request, exc):
459
  "type": "not_found_error",
460
  "code": "not_found"
461
  }
462
- }, 404
 
 
 
 
 
 
 
 
 
 
463
 
464
  # 启动事件
465
  @app.on_event("startup")
@@ -467,6 +428,9 @@ async def startup_event():
467
  """服务启动时初始化"""
468
  logger.info(f"OpenAI API代理服务已启动,可以接受请求")
469
  logger.info(f"支持多token轮询,请在Authorization头中使用英文逗号分隔多个token")
 
 
 
470
 
471
  # 主程序
472
  if __name__ == "__main__":
 
1
+ # 启动事件
2
+ @app.on_event("startup")
3
+ async def startup_event():
4
+ """服务启动时初始化"""
5
+ logger.info(f"OpenAI API代理服务已启动,可以接受请求")
6
+ logger.info(f"支持多token轮询,请在Authorization头中使用英文逗号分隔多个token")
7
+ logger.info(f"服务地址: http://127.0.0.1:7860")
8
+ logger.info(f"OpenAI API格式请求示例: POST http://127.0.0.1:7860import json
9
  import time
10
  import asyncio
11
  import uvicorn
 
37
  version="1.0.0"
38
  )
39
 
40
+ # 增加日志输出级别
41
+ if os.getenv("DEBUG", "false").lower() == "true":
42
+ logging.getLogger("openai-proxy").setLevel(logging.DEBUG)
43
+
44
  # 添加CORS中间件
45
  app.add_middleware(
46
  CORSMiddleware,
 
51
  )
52
 
53
  # 配置
54
+ DEEPSIDER_API_BASE = "https://api.chargpt.ai/api/v1"
55
  TOKEN_INDEX = 0
56
 
57
  # 模型映射表
 
67
  "claude-3.7-sonnet": "anthropic/claude-3.7-sonnet",
68
  }
69
 
 
70
  def get_headers(api_key):
71
  global TOKEN_INDEX
72
  # 检查是否包含多个token(用逗号分隔)
 
80
  current_token = api_key
81
 
82
  return {
83
+ "accept": "application/json",
 
 
84
  "content-type": "application/json",
 
 
 
 
 
 
 
 
 
85
  "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/134.0.0.0 Safari/537.36",
86
+ "authorization": f"Bearer {current_token.strip()}",
87
+ "i-version": "1.1.64"
88
  }
89
 
90
  # OpenAI API请求模型
 
169
 
170
  def format_messages_for_deepsider(messages: List[ChatMessage]) -> str:
171
  """格式化消息列表为DeepSider API所需的提示格式"""
172
+ # 直接合并所有消息内容,无需特殊格式化
173
+ combined_prompt = ""
174
  for msg in messages:
175
+ if msg.role == "system":
176
+ combined_prompt = msg.content + "\n\n" + combined_prompt
 
 
 
 
 
 
 
177
  else:
178
+ combined_prompt += msg.content + "\n\n"
 
 
 
 
 
179
 
180
+ return combined_prompt.strip()
181
 
182
  async def generate_openai_response(full_response: str, request_id: str, model: str) -> Dict:
183
  """生成符合OpenAI API响应格式的完整响应"""
 
207
  async def stream_openai_response(response, request_id: str, model: str, api_key, token_index):
208
  """流式返回OpenAI API格式的响应"""
209
  timestamp = int(time.time())
 
210
 
211
  try:
212
+ # 直接传递原始响应
213
+ for chunk in response.iter_lines():
214
+ if chunk:
215
+ yield chunk.decode('utf-8') + "\n"
216
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
217
  except Exception as e:
218
  logger.error(f"流式响应处理出错: {str(e)}")
219
 
 
 
 
 
 
 
220
  # 返回错误信息
221
  error_chunk = {
222
  "id": f"chatcmpl-{request_id}",
 
239
  # 路由定义
240
  @app.get("/")
241
  async def root():
242
+ """返回简单的HTML页面,展示使用说明"""
243
+ return {
244
+ "message": "OpenAI API Proxy服务已启动 连接至DeepSider API",
245
+ "usage": {
246
+ "模型列表": "GET /v1/models",
247
+ "聊天完成": "POST /v1/chat/completions",
248
+ "账户余额": "GET /admin/balance (需要X-Admin-Key头)"
249
+ },
250
+ "说明": "请在Authorization头中使用Bearer token格式,支持使用英文逗号分隔多个token实现轮询"
251
+ }
252
 
253
  @app.get("/v1/models")
254
  async def list_models(api_key: str = Depends(verify_api_key)):
 
290
  payload = {
291
  "model": deepsider_model,
292
  "prompt": prompt,
293
+ "stream": chat_request.stream
 
294
  }
295
 
296
  # 获取请求头(包含选择的token)
 
300
  current_token_index = (TOKEN_INDEX - 1) % len(tokens) if len(tokens) > 0 else 0
301
 
302
  try:
303
+ # 记录请求信息
304
+ logger.info(f"发送请求到DeepSider API - 模型: {deepsider_model}, Prompt长度: {len(prompt)}")
305
+ logger.debug(f"请求正文: {json.dumps(payload)}")
306
+
307
  # 发送请求到DeepSider API
308
  response = requests.post(
309
+ f"{DEEPSIDER_API_BASE}/chat/completions",
310
  headers=headers,
311
  json=payload,
312
+ stream=chat_request.stream,
313
+ timeout=60 # 设置60秒超时
314
  )
315
 
316
  # 检查响应状态
 
320
  error_data = response.json()
321
  error_msg += f" - {error_data.get('message', '')}"
322
  except:
323
+ try:
324
+ error_msg += f" - {response.text[:200]}"
325
+ except:
326
+ pass
327
 
328
  logger.error(error_msg)
329
+ return {
330
+ "error": {
331
+ "message": error_msg,
332
+ "type": "api_error",
333
+ "code": response.status_code
334
+ }
335
+ }
336
 
337
  # 处理流式或非流式响应
338
  if chat_request.stream:
 
342
  media_type="text/event-stream"
343
  )
344
  else:
345
+ try:
346
+ # 非流式请求,直接返回响应
347
+ json_response = response.json()
348
+ # 记录响应,有助于调试
349
+ logger.debug(f"非流式响应: {json.dumps(json_response)}")
350
+ return json_response
351
+
352
+ except Exception as e:
353
+ logger.exception(f"非流式响应处理出错: {str(e)}")
354
+ return {
355
+ "error": {
356
+ "message": f"处理响应时出错: {str(e)}",
357
+ "type": "processing_error",
358
+ "code": "internal_error"
359
+ }
360
+ }
 
 
 
 
361
 
362
  except HTTPException:
363
  raise
 
410
  "type": "not_found_error",
411
  "code": "not_found"
412
  }
413
+ }
414
+
415
+ @app.exception_handler(500)
416
+ async def server_error_handler(request, exc):
417
+ return {
418
+ "error": {
419
+ "message": f"服务器内部错误: {str(exc)}",
420
+ "type": "server_error",
421
+ "code": "internal_server_error"
422
+ }
423
+ }
424
 
425
  # 启动事件
426
  @app.on_event("startup")
 
428
  """服务启动时初始化"""
429
  logger.info(f"OpenAI API代理服务已启动,可以接受请求")
430
  logger.info(f"支持多token轮询,请在Authorization头中使用英文逗号分隔多个token")
431
+ logger.info(f"服务地址: http://127.0.0.1:7860")
432
+ logger.info(f"OpenAI API格式请求示例: POST http://127.0.0.1:7860/v1/chat/completions")
433
+ logger.info(f"可用模型查询: GET http://127.0.0.1:7860/v1/models")
434
 
435
  # 主程序
436
  if __name__ == "__main__":