chb2026 commited on
Commit
1222cee
·
verified ·
1 Parent(s): 31d121c

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +535 -0
app.py ADDED
@@ -0,0 +1,535 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import time
3
+ import asyncio
4
+ import uvicorn
5
+ from fastapi import FastAPI, Request, HTTPException, Header, Depends
6
+ from fastapi.responses import StreamingResponse
7
+ from fastapi.middleware.cors import CORSMiddleware
8
+ from pydantic import BaseModel, Field
9
+ from typing import List, Optional, Dict, Any, Union
10
+ import requests
11
+ from datetime import datetime
12
+ import logging
13
+ import os
14
+ from dotenv import load_dotenv
15
+
16
+ # 加载环境变量
17
+ load_dotenv()
18
+
19
+ # 配置日志
20
+ logging.basicConfig(
21
+ level=logging.INFO,
22
+ format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
23
+ )
24
+ logger = logging.getLogger("openai-proxy")
25
+
26
+ # 创建FastAPI应用
27
+ app = FastAPI(
28
+ title="OpenAI API Proxy",
29
+ description="将OpenAI API请求代理到DeepSider API",
30
+ version="1.0.0"
31
+ )
32
+
33
+ # 添加CORS中间件
34
+ app.add_middleware(
35
+ CORSMiddleware,
36
+ allow_origins=["*"],
37
+ allow_credentials=True,
38
+ allow_methods=["*"],
39
+ allow_headers=["*"],
40
+ )
41
+
42
+ # 配置
43
+ DEEPSIDER_API_BASE = "https://api.chargpt.ai/api/v2"
44
+ DEEPSIDER_TOKEN = os.getenv("DEEPSIDER_TOKEN", "").split(',')
45
+ TOKEN_INDEX = 0
46
+
47
+ # 模型映射表
48
+ MODEL_MAPPING = {
49
+ "gpt-3.5-turbo": "anthropic/claude-3.5-sonnet",
50
+ "gpt-4": "anthropic/claude-3.7-sonnet",
51
+ "gpt-4o": "openai/gpt-4o",
52
+ "gpt-4-turbo": "openai/gpt-4o",
53
+ "gpt-4o-mini": "openai/gpt-4o-mini",
54
+ "claude-3-sonnet-20240229": "anthropic/claude-3.5-sonnet",
55
+ "claude-3-opus-20240229": "anthropic/claude-3.7-sonnet",
56
+ "claude-3.5-sonnet": "anthropic/claude-3.5-sonnet",
57
+ "claude-3.7-sonnet": "anthropic/claude-3.7-sonnet",
58
+ }
59
+
60
+ # Token负载均衡状态
61
+ token_status = {}
62
+
63
+ # 请求头
64
+ def get_headers():
65
+ global TOKEN_INDEX
66
+ # 负载均衡,轮询选择token
67
+ if len(DEEPSIDER_TOKEN) > 0:
68
+ current_token = DEEPSIDER_TOKEN[TOKEN_INDEX % len(DEEPSIDER_TOKEN)]
69
+ TOKEN_INDEX = (TOKEN_INDEX + 1) % len(DEEPSIDER_TOKEN)
70
+
71
+ # 检查token状态
72
+ if current_token in token_status and not token_status[current_token]["active"]:
73
+ # 如果token不可用,尝试下一个
74
+ for i in range(len(DEEPSIDER_TOKEN)):
75
+ next_token = DEEPSIDER_TOKEN[(TOKEN_INDEX + i) % len(DEEPSIDER_TOKEN)]
76
+ if next_token not in token_status or token_status[next_token]["active"]:
77
+ current_token = next_token
78
+ TOKEN_INDEX = (TOKEN_INDEX + i + 1) % len(DEEPSIDER_TOKEN)
79
+ break
80
+ else:
81
+ current_token = ""
82
+
83
+ return {
84
+ "accept": "*/*",
85
+ "accept-encoding": "gzip, deflate, br, zstd",
86
+ "accept-language": "en-US,en;q=0.9,zh-CN;q=0.8,zh;q=0.7",
87
+ "content-type": "application/json",
88
+ "origin": "chrome-extension://client",
89
+ "i-lang": "zh-CN",
90
+ "i-version": "1.1.64",
91
+ "sec-ch-ua": '"Chromium";v="134", "Not:A-Brand";v="24"',
92
+ "sec-ch-ua-mobile": "?0",
93
+ "sec-ch-ua-platform": "Windows",
94
+ "sec-fetch-dest": "empty",
95
+ "sec-fetch-mode": "cors",
96
+ "sec-fetch-site": "cross-site",
97
+ "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/134.0.0.0 Safari/537.36",
98
+ "authorization": f"Bearer {current_token}"
99
+ }
100
+
101
+ # OpenAI API请求模型
102
+ class ChatMessage(BaseModel):
103
+ role: str
104
+ content: str
105
+ name: Optional[str] = None
106
+
107
+ class ChatCompletionRequest(BaseModel):
108
+ model: str
109
+ messages: List[ChatMessage]
110
+ temperature: Optional[float] = 1.0
111
+ top_p: Optional[float] = 1.0
112
+ n: Optional[int] = 1
113
+ stream: Optional[bool] = False
114
+ stop: Optional[Union[List[str], str]] = None
115
+ max_tokens: Optional[int] = None
116
+ presence_penalty: Optional[float] = 0
117
+ frequency_penalty: Optional[float] = 0
118
+ user: Optional[str] = None
119
+
120
+ # 初始化token状态
121
+ async def initialize_token_status():
122
+ """初始化检查所有token的状态和余额"""
123
+ global token_status
124
+
125
+ for token in DEEPSIDER_TOKEN:
126
+ headers = {
127
+ "accept": "*/*",
128
+ "content-type": "application/json",
129
+ "authorization": f"Bearer {token}"
130
+ }
131
+
132
+ try:
133
+ # 获取账户余额信息
134
+ response = requests.get(
135
+ f"{DEEPSIDER_API_BASE.replace('/v2', '')}/quota/retrieve",
136
+ headers=headers
137
+ )
138
+
139
+ active = False
140
+ quota_info = {}
141
+
142
+ if response.status_code == 200:
143
+ data = response.json()
144
+ if data.get('code') == 0:
145
+ quota_list = data.get('data', {}).get('list', [])
146
+
147
+ # 解析余额信息
148
+ for item in quota_list:
149
+ item_type = item.get('type', '')
150
+ available = item.get('available', 0)
151
+
152
+ if available > 0:
153
+ active = True
154
+
155
+ quota_info[item_type] = {
156
+ "total": item.get('total', 0),
157
+ "available": available,
158
+ "title": item.get('title', '')
159
+ }
160
+
161
+ token_status[token] = {
162
+ "active": active,
163
+ "quota": quota_info,
164
+ "last_checked": datetime.now(),
165
+ "failed_count": 0
166
+ }
167
+
168
+ logger.info(f"Token {token[:8]}... 状态:{'活跃' if active else '无效'}")
169
+
170
+ except Exception as e:
171
+ logger.warning(f"检查Token {token[:8]}... 出错:{str(e)}")
172
+ token_status[token] = {
173
+ "active": False,
174
+ "quota": {},
175
+ "last_checked": datetime.now(),
176
+ "failed_count": 0
177
+ }
178
+
179
+ # 工具函数
180
+ def verify_api_key(api_key: str = Header(..., alias="Authorization")):
181
+ """验证API密钥"""
182
+ if not api_key.startswith("Bearer "):
183
+ raise HTTPException(status_code=401, detail="Invalid API key format")
184
+ return api_key.replace("Bearer ", "")
185
+
186
+ def map_openai_to_deepsider_model(model: str) -> str:
187
+ """将OpenAI模型名称映射到DeepSider模型名称"""
188
+ return MODEL_MAPPING.get(model, "anthropic/claude-3.7-sonnet")
189
+
190
+ def format_messages_for_deepsider(messages: List[ChatMessage]) -> str:
191
+ """格式化消息列表为DeepSider API所需的提示格式"""
192
+ prompt = ""
193
+ for msg in messages:
194
+ role = msg.role
195
+ # 将OpenAI的角色映射到DeepSider能理解的格式
196
+ if role == "system":
197
+ # 系统消息放在开头 作为指导
198
+ prompt = f"{msg.content}\n\n" + prompt
199
+ elif role == "user":
200
+ prompt += f"Human: {msg.content}\n\n"
201
+ elif role == "assistant":
202
+ prompt += f"Assistant: {msg.content}\n\n"
203
+ else:
204
+ # 其他角色按用户处理
205
+ prompt += f"Human ({role}): {msg.content}\n\n"
206
+
207
+ # 如果最后一个消息不是用户的 添加一个Human前缀引导模型回答
208
+ if messages and messages[-1].role != "user":
209
+ prompt += "Human: "
210
+
211
+ return prompt.strip()
212
+
213
+ def update_token_status(token: str, success: bool, error_message: str = None):
214
+ """更新token的状态"""
215
+ global token_status
216
+
217
+ if token not in token_status:
218
+ token_status[token] = {
219
+ "active": True,
220
+ "quota": {},
221
+ "last_checked": datetime.now(),
222
+ "failed_count": 0
223
+ }
224
+
225
+ if not success:
226
+ token_status[token]["failed_count"] += 1
227
+
228
+ # 如果失败消息包含余额不足,标记为不活跃
229
+ if error_message and ("配额不足" in error_message or "quota" in error_message.lower()):
230
+ token_status[token]["active"] = False
231
+ logger.warning(f"Token {token[:8]}... 余额不足,已标记为不活跃")
232
+
233
+ # 连续失败5次,也标记为不活跃
234
+ if token_status[token]["failed_count"] >= 5:
235
+ token_status[token]["active"] = False
236
+ logger.warning(f"Token {token[:8]}... 连续失败{token_status[token]['failed_count']}次,已标记为不活跃")
237
+ else:
238
+ # 成功则重置失败计数
239
+ token_status[token]["failed_count"] = 0
240
+
241
+ async def generate_openai_response(full_response: str, request_id: str, model: str) -> Dict:
242
+ """生成符合OpenAI API响应格式的完整响应"""
243
+ timestamp = int(time.time())
244
+ return {
245
+ "id": f"chatcmpl-{request_id}",
246
+ "object": "chat.completion",
247
+ "created": timestamp,
248
+ "model": model,
249
+ "choices": [
250
+ {
251
+ "index": 0,
252
+ "message": {
253
+ "role": "assistant",
254
+ "content": full_response
255
+ },
256
+ "finish_reason": "stop"
257
+ }
258
+ ],
259
+ "usage": {
260
+ "prompt_tokens": 0, # 无法准确计算
261
+ "completion_tokens": 0, # 无法准确计算
262
+ "total_tokens": 0 # 无法准确计算
263
+ }
264
+ }
265
+
266
+ async def stream_openai_response(response, request_id: str, model: str, token: str):
267
+ """流式返回OpenAI API格式的响应"""
268
+ timestamp = int(time.time())
269
+ full_response = ""
270
+
271
+ try:
272
+ # 将DeepSider响应流转换为OpenAI流格式
273
+ for line in response.iter_lines():
274
+ if not line:
275
+ continue
276
+
277
+ if line.startswith(b'data: '):
278
+ try:
279
+ data = json.loads(line[6:].decode('utf-8'))
280
+
281
+ if data.get('code') == 202 and data.get('data', {}).get('type') == "chat":
282
+ # 获取正文内容
283
+ content = data.get('data', {}).get('content', '')
284
+ if content:
285
+ full_response += content
286
+
287
+ # 生成OpenAI格式的流式响应
288
+ chunk = {
289
+ "id": f"chatcmpl-{request_id}",
290
+ "object": "chat.completion.chunk",
291
+ "created": timestamp,
292
+ "model": model,
293
+ "choices": [
294
+ {
295
+ "index": 0,
296
+ "delta": {
297
+ "content": content
298
+ },
299
+ "finish_reason": None
300
+ }
301
+ ]
302
+ }
303
+ yield f"data: {json.dumps(chunk)}\n\n"
304
+
305
+ elif data.get('code') == 203:
306
+ # 生成完成信号
307
+ chunk = {
308
+ "id": f"chatcmpl-{request_id}",
309
+ "object": "chat.completion.chunk",
310
+ "created": timestamp,
311
+ "model": model,
312
+ "choices": [
313
+ {
314
+ "index": 0,
315
+ "delta": {},
316
+ "finish_reason": "stop"
317
+ }
318
+ ]
319
+ }
320
+ yield f"data: {json.dumps(chunk)}\n\n"
321
+ yield "data: [DONE]\n\n"
322
+
323
+ except json.JSONDecodeError:
324
+ logger.warning(f"无法解析响应: {line}")
325
+
326
+ # 更新token状态(成功)
327
+ update_token_status(token, True)
328
+
329
+ except Exception as e:
330
+ logger.error(f"流式响应处理出错: {str(e)}")
331
+ # 更新token状态(失败)
332
+ update_token_status(token, False, str(e))
333
+
334
+ # 返回错误信息
335
+ error_chunk = {
336
+ "id": f"chatcmpl-{request_id}",
337
+ "object": "chat.completion.chunk",
338
+ "created": timestamp,
339
+ "model": model,
340
+ "choices": [
341
+ {
342
+ "index": 0,
343
+ "delta": {
344
+ "content": f"\n\n[处理响应时出错: {str(e)}]"
345
+ },
346
+ "finish_reason": "stop"
347
+ }
348
+ ]
349
+ }
350
+ yield f"data: {json.dumps(error_chunk)}\n\n"
351
+ yield "data: [DONE]\n\n"
352
+
353
+ # 路由定义
354
+ @app.get("/")
355
+ async def root():
356
+ return {"message": "OpenAI API Proxy服务已启动 连接至DeepSider API"}
357
+
358
+ @app.get("/v1/models")
359
+ async def list_models(api_key: str = Depends(verify_api_key)):
360
+ """列出可用的模型"""
361
+ models = []
362
+ for openai_model, _ in MODEL_MAPPING.items():
363
+ models.append({
364
+ "id": openai_model,
365
+ "object": "model",
366
+ "created": int(time.time()),
367
+ "owned_by": "openai-proxy"
368
+ })
369
+
370
+ return {
371
+ "object": "list",
372
+ "data": models
373
+ }
374
+
375
+ @app.post("/v1/chat/completions")
376
+ async def create_chat_completion(
377
+ request: Request,
378
+ api_key: str = Depends(verify_api_key)
379
+ ):
380
+ """创建聊天完成API - 支持普通请求和流式请求"""
381
+ # 解析请求体
382
+ body = await request.json()
383
+ chat_request = ChatCompletionRequest(**body)
384
+
385
+ # 生成唯一请求ID
386
+ request_id = datetime.now().strftime("%Y%m%d%H%M%S") + str(time.time_ns())[-6:]
387
+
388
+ # 映射模型
389
+ deepsider_model = map_openai_to_deepsider_model(chat_request.model)
390
+
391
+ # 准备DeepSider API所需的提示
392
+ prompt = format_messages_for_deepsider(chat_request.messages)
393
+
394
+ # 准备请求体
395
+ payload = {
396
+ "model": deepsider_model,
397
+ "prompt": prompt,
398
+ "webAccess": "close", # 默认关闭网络访问
399
+ "timezone": "Asia/Shanghai"
400
+ }
401
+
402
+ # 获取当前token
403
+ headers = get_headers()
404
+ current_token = headers["authorization"].replace("Bearer ", "")
405
+
406
+ try:
407
+ # 发送请求到DeepSider API
408
+ response = requests.post(
409
+ f"{DEEPSIDER_API_BASE}/chat/conversation",
410
+ headers=headers,
411
+ json=payload,
412
+ stream=True
413
+ )
414
+
415
+ # 检查响应状态
416
+ if response.status_code != 200:
417
+ error_msg = f"DeepSider API请求失败: {response.status_code}"
418
+ try:
419
+ error_data = response.json()
420
+ error_msg += f" - {error_data.get('message', '')}"
421
+ except:
422
+ error_msg += f" - {response.text}"
423
+
424
+ logger.error(error_msg)
425
+
426
+ # 更新token状态
427
+ update_token_status(current_token, False, error_msg)
428
+
429
+ raise HTTPException(status_code=response.status_code, detail="API请求失败")
430
+
431
+ # 处理流式或非流式响应
432
+ if chat_request.stream:
433
+ # 返回流式响应
434
+ return StreamingResponse(
435
+ stream_openai_response(response, request_id, chat_request.model, current_token),
436
+ media_type="text/event-stream"
437
+ )
438
+ else:
439
+ # 收集完整响应
440
+ full_response = ""
441
+ for line in response.iter_lines():
442
+ if not line:
443
+ continue
444
+
445
+ if line.startswith(b'data: '):
446
+ try:
447
+ data = json.loads(line[6:].decode('utf-8'))
448
+
449
+ if data.get('code') == 202 and data.get('data', {}).get('type') == "chat":
450
+ content = data.get('data', {}).get('content', '')
451
+ if content:
452
+ full_response += content
453
+
454
+ except json.JSONDecodeError:
455
+ pass
456
+
457
+ # 更新token状态(成功)
458
+ update_token_status(current_token, True)
459
+
460
+ # 返回OpenAI格式的完整响应
461
+ return await generate_openai_response(full_response, request_id, chat_request.model)
462
+
463
+ except HTTPException:
464
+ raise
465
+ except Exception as e:
466
+ logger.exception("处理请求时出错")
467
+ # 更新token状态(失败)
468
+ update_token_status(current_token, False, str(e))
469
+ raise HTTPException(status_code=500, detail=f"内部服务器错误: {str(e)}")
470
+
471
+ # 查看token状态的端点
472
+ @app.get("/admin/tokens")
473
+ async def get_token_status(admin_key: str = Header(None, alias="X-Admin-Key")):
474
+ """查看所有token的状态"""
475
+ # 简单的管理密钥检查
476
+ expected_admin_key = os.getenv("ADMIN_KEY", "admin")
477
+ if not admin_key or admin_key != expected_admin_key:
478
+ raise HTTPException(status_code=403, detail="Unauthorized")
479
+
480
+ # 脱敏token,只显示前8位
481
+ safe_status = {}
482
+ for token, status in token_status.items():
483
+ token_display = token[:8] + "..." if len(token) > 8 else token
484
+ safe_status[token_display] = status
485
+
486
+ return {"tokens": safe_status, "active_tokens": sum(1 for s in token_status.values() if s["active"])}
487
+
488
+ # 手动刷新token状态
489
+ @app.post("/admin/refresh-tokens")
490
+ async def refresh_token_status(admin_key: str = Header(None, alias="X-Admin-Key")):
491
+ """手动刷新所有token的状态"""
492
+ # 简单的管理密钥检查
493
+ expected_admin_key = os.getenv("ADMIN_KEY", "admin")
494
+ if not admin_key or admin_key != expected_admin_key:
495
+ raise HTTPException(status_code=403, detail="Unauthorized")
496
+
497
+ await initialize_token_status()
498
+ return {"message": "所有token状态已刷新", "active_tokens": sum(1 for s in token_status.values() if s["active"])}
499
+
500
+ # 模拟模型的路由
501
+ @app.get("/v1/engines")
502
+ @app.get("/v1/engines/{engine_id}")
503
+ async def engines_handler():
504
+ """兼容旧的引擎API"""
505
+ raise HTTPException(status_code=404, detail="引擎API已被弃用 请使用模型API")
506
+
507
+ # 错误处理器
508
+ @app.exception_handler(404)
509
+ async def not_found_handler(request, exc):
510
+ return {
511
+ "error": {
512
+ "message": f"未找到资源: {request.url.path}",
513
+ "type": "not_found_error",
514
+ "code": "not_found"
515
+ }
516
+ }, 404
517
+
518
+ # 启动事件
519
+ @app.on_event("startup")
520
+ async def startup_event():
521
+ """服务启动时初始化token状态"""
522
+ if not DEEPSIDER_TOKEN or (len(DEEPSIDER_TOKEN) == 1 and DEEPSIDER_TOKEN[0] == ""):
523
+ logger.warning("未设置DEEPSIDER_TOKEN环境变量 请设置后再重启服务")
524
+ else:
525
+ logger.info(f"初始化 {len(DEEPSIDER_TOKEN)} 个token状态...")
526
+ await initialize_token_status()
527
+ active_tokens = sum(1 for s in token_status.values() if s["active"])
528
+ logger.info(f"初始化完成 活跃token: {active_tokens}/{len(DEEPSIDER_TOKEN)}")
529
+
530
+ # 主程序
531
+ if __name__ == "__main__":
532
+ # 启动服务器
533
+ port = int(os.getenv("PORT", "3000"))
534
+ logger.info(f"启动OpenAI API代理服务 端口: {port}")
535
+ uvicorn.run(app, host="0.0.0.0", port=port)