dfa32412 commited on
Commit
61c3332
·
verified ·
1 Parent(s): 2ad48b6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +72 -51
app.py CHANGED
@@ -60,6 +60,7 @@ MODEL_MAPPING = {
60
  # Token负载均衡状态
61
  token_status = {}
62
 
 
63
  # 请求头
64
  def get_headers():
65
  global TOKEN_INDEX
@@ -67,7 +68,7 @@ def get_headers():
67
  if len(DEEPSIDER_TOKEN) > 0:
68
  current_token = DEEPSIDER_TOKEN[TOKEN_INDEX % len(DEEPSIDER_TOKEN)]
69
  TOKEN_INDEX = (TOKEN_INDEX + 1) % len(DEEPSIDER_TOKEN)
70
-
71
  # 检查token状态
72
  if current_token in token_status and not token_status[current_token]["active"]:
73
  # 如果token不可用,尝试下一个
@@ -79,7 +80,7 @@ def get_headers():
79
  break
80
  else:
81
  current_token = ""
82
-
83
  return {
84
  "accept": "*/*",
85
  "accept-encoding": "gzip, deflate, br, zstd",
@@ -98,12 +99,14 @@ def get_headers():
98
  "authorization": f"Bearer {current_token}"
99
  }
100
 
 
101
  # OpenAI API请求模型
102
  class ChatMessage(BaseModel):
103
  role: str
104
  content: str
105
  name: Optional[str] = None
106
 
 
107
  class ChatCompletionRequest(BaseModel):
108
  model: str
109
  messages: List[ChatMessage]
@@ -116,57 +119,58 @@ class ChatCompletionRequest(BaseModel):
116
  presence_penalty: Optional[float] = 0
117
  frequency_penalty: Optional[float] = 0
118
  user: Optional[str] = None
119
-
 
120
  # 初始化token状态
121
  async def initialize_token_status():
122
  """初始化检查所有token的状态和余额"""
123
  global token_status
124
-
125
  for token in DEEPSIDER_TOKEN:
126
  headers = {
127
  "accept": "*/*",
128
  "content-type": "application/json",
129
  "authorization": f"Bearer {token}"
130
  }
131
-
132
  try:
133
  # 获取账户余额信息
134
  response = requests.get(
135
  f"{DEEPSIDER_API_BASE.replace('/v2', '')}/quota/retrieve",
136
  headers=headers
137
  )
138
-
139
  active = False
140
  quota_info = {}
141
-
142
  if response.status_code == 200:
143
  data = response.json()
144
  if data.get('code') == 0:
145
  quota_list = data.get('data', {}).get('list', [])
146
-
147
  # 解析余额信息
148
  for item in quota_list:
149
  item_type = item.get('type', '')
150
  available = item.get('available', 0)
151
-
152
  if available > 0:
153
  active = True
154
-
155
  quota_info[item_type] = {
156
  "total": item.get('total', 0),
157
  "available": available,
158
  "title": item.get('title', '')
159
  }
160
-
161
  token_status[token] = {
162
  "active": active,
163
  "quota": quota_info,
164
  "last_checked": datetime.now(),
165
  "failed_count": 0
166
  }
167
-
168
  logger.info(f"Token {token[:8]}... 状态:{'活跃' if active else '无效'}")
169
-
170
  except Exception as e:
171
  logger.warning(f"检查Token {token[:8]}... 出错:{str(e)}")
172
  token_status[token] = {
@@ -176,6 +180,7 @@ async def initialize_token_status():
176
  "failed_count": 0
177
  }
178
 
 
179
  # 工具函数
180
  def verify_api_key(api_key: str = Header(..., alias="Authorization")):
181
  """验证API密钥"""
@@ -183,10 +188,12 @@ def verify_api_key(api_key: str = Header(..., alias="Authorization")):
183
  raise HTTPException(status_code=401, detail="Invalid API key format")
184
  return api_key.replace("Bearer ", "")
185
 
 
186
  def map_openai_to_deepsider_model(model: str) -> str:
187
  """将OpenAI模型名称映射到DeepSider模型名称"""
188
  return MODEL_MAPPING.get(model, "anthropic/claude-3.7-sonnet")
189
 
 
190
  def format_messages_for_deepsider(messages: List[ChatMessage]) -> str:
191
  """格式化消息列表为DeepSider API所需的提示格式"""
192
  prompt = ""
@@ -203,17 +210,18 @@ def format_messages_for_deepsider(messages: List[ChatMessage]) -> str:
203
  else:
204
  # 其他角色按用户处理
205
  prompt += f"Human ({role}): {msg.content}\n\n"
206
-
207
  # 如果最后一个消息不是用户的 添加一个Human前缀引导模型回答
208
  if messages and messages[-1].role != "user":
209
  prompt += "Human: "
210
-
211
  return prompt.strip()
212
 
 
213
  def update_token_status(token: str, success: bool, error_message: str = None):
214
  """更新token的状态"""
215
  global token_status
216
-
217
  if token not in token_status:
218
  token_status[token] = {
219
  "active": True,
@@ -221,15 +229,15 @@ def update_token_status(token: str, success: bool, error_message: str = None):
221
  "last_checked": datetime.now(),
222
  "failed_count": 0
223
  }
224
-
225
  if not success:
226
  token_status[token]["failed_count"] += 1
227
-
228
  # 如果失败消息包含余额不足,标记为不活跃
229
  if error_message and ("配额不足" in error_message or "quota" in error_message.lower()):
230
  token_status[token]["active"] = False
231
  logger.warning(f"Token {token[:8]}... 余额不足,已标记为不活跃")
232
-
233
  # 连续失败5次,也标记为不活跃
234
  if token_status[token]["failed_count"] >= 5:
235
  token_status[token]["active"] = False
@@ -238,6 +246,7 @@ def update_token_status(token: str, success: bool, error_message: str = None):
238
  # 成功则重置失败计数
239
  token_status[token]["failed_count"] = 0
240
 
 
241
  async def generate_openai_response(full_response: str, request_id: str, model: str) -> Dict:
242
  """生成符合OpenAI API响应格式的完整响应"""
243
  timestamp = int(time.time())
@@ -263,27 +272,30 @@ async def generate_openai_response(full_response: str, request_id: str, model: s
263
  }
264
  }
265
 
 
266
  async def stream_openai_response(response, request_id: str, model: str, token: str):
267
  """流式返回OpenAI API格式的响应"""
268
  timestamp = int(time.time())
269
  full_response = ""
270
-
271
  try:
272
  # 将DeepSider响应流转换为OpenAI流格式
273
  for line in response.iter_lines():
274
  if not line:
275
  continue
276
-
277
  if line.startswith(b'data: '):
278
  try:
279
  data = json.loads(line[6:].decode('utf-8'))
280
-
 
 
281
  if data.get('code') == 202 and data.get('data', {}).get('type') == "chat":
282
  # 获取正文内容
283
  content = data.get('data', {}).get('content', '')
284
  if content:
285
  full_response += content
286
-
287
  # 生成OpenAI格式的流式响应
288
  chunk = {
289
  "id": f"chatcmpl-{request_id}",
@@ -301,7 +313,7 @@ async def stream_openai_response(response, request_id: str, model: str, token: s
301
  ]
302
  }
303
  yield f"data: {json.dumps(chunk)}\n\n"
304
-
305
  elif data.get('code') == 203:
306
  # 生成完成信号
307
  chunk = {
@@ -319,18 +331,18 @@ async def stream_openai_response(response, request_id: str, model: str, token: s
319
  }
320
  yield f"data: {json.dumps(chunk)}\n\n"
321
  yield "data: [DONE]\n\n"
322
-
323
  except json.JSONDecodeError:
324
  logger.warning(f"无法解析响应: {line}")
325
-
326
  # 更新token状态(成功)
327
  update_token_status(token, True)
328
-
329
  except Exception as e:
330
  logger.error(f"流式响应处理出错: {str(e)}")
331
  # 更新token状态(失败)
332
  update_token_status(token, False, str(e))
333
-
334
  # 返回错误信息
335
  error_chunk = {
336
  "id": f"chatcmpl-{request_id}",
@@ -350,11 +362,13 @@ async def stream_openai_response(response, request_id: str, model: str, token: s
350
  yield f"data: {json.dumps(error_chunk)}\n\n"
351
  yield "data: [DONE]\n\n"
352
 
 
353
  # 路由定义
354
  @app.get("/")
355
  async def root():
356
  return {"message": "OpenAI API Proxy服务已启动 连接至DeepSider API"}
357
 
 
358
  @app.get("/v1/models")
359
  async def list_models(api_key: str = Depends(verify_api_key)):
360
  """列出可用的模型"""
@@ -366,31 +380,32 @@ async def list_models(api_key: str = Depends(verify_api_key)):
366
  "created": int(time.time()),
367
  "owned_by": "openai-proxy"
368
  })
369
-
370
  return {
371
  "object": "list",
372
  "data": models
373
  }
374
 
 
375
  @app.post("/v1/chat/completions")
376
  async def create_chat_completion(
377
- request: Request,
378
- api_key: str = Depends(verify_api_key)
379
  ):
380
  """创建聊天完成API - 支持普通请求和流式请求"""
381
  # 解析请求体
382
  body = await request.json()
383
  chat_request = ChatCompletionRequest(**body)
384
-
385
  # 生成唯一请求ID
386
  request_id = datetime.now().strftime("%Y%m%d%H%M%S") + str(time.time_ns())[-6:]
387
-
388
  # 映射模型
389
  deepsider_model = map_openai_to_deepsider_model(chat_request.model)
390
-
391
  # 准备DeepSider API所需的提示
392
  prompt = format_messages_for_deepsider(chat_request.messages)
393
-
394
  # 准备请求体
395
  payload = {
396
  "model": deepsider_model,
@@ -398,11 +413,11 @@ async def create_chat_completion(
398
  "webAccess": "close", # 默认关闭网络访问
399
  "timezone": "Asia/Shanghai"
400
  }
401
-
402
  # 获取当前token
403
  headers = get_headers()
404
  current_token = headers["authorization"].replace("Bearer ", "")
405
-
406
  try:
407
  # 发送请求到DeepSider API
408
  response = requests.post(
@@ -411,7 +426,7 @@ async def create_chat_completion(
411
  json=payload,
412
  stream=True
413
  )
414
-
415
  # 检查响应状态
416
  if response.status_code != 200:
417
  error_msg = f"DeepSider API请求失败: {response.status_code}"
@@ -420,14 +435,14 @@ async def create_chat_completion(
420
  error_msg += f" - {error_data.get('message', '')}"
421
  except:
422
  error_msg += f" - {response.text}"
423
-
424
  logger.error(error_msg)
425
-
426
  # 更新token状态
427
  update_token_status(current_token, False, error_msg)
428
-
429
  raise HTTPException(status_code=response.status_code, detail="API请求失败")
430
-
431
  # 处理流式或非流式响应
432
  if chat_request.stream:
433
  # 返回流式响应
@@ -441,25 +456,25 @@ async def create_chat_completion(
441
  for line in response.iter_lines():
442
  if not line:
443
  continue
444
-
445
  if line.startswith(b'data: '):
446
  try:
447
  data = json.loads(line[6:].decode('utf-8'))
448
-
449
  if data.get('code') == 202 and data.get('data', {}).get('type') == "chat":
450
  content = data.get('data', {}).get('content', '')
451
  if content:
452
  full_response += content
453
-
454
  except json.JSONDecodeError:
455
  pass
456
-
457
  # 更新token状态(成功)
458
  update_token_status(current_token, True)
459
-
460
  # 返回OpenAI格式的完整响应
461
  return await generate_openai_response(full_response, request_id, chat_request.model)
462
-
463
  except HTTPException:
464
  raise
465
  except Exception as e:
@@ -468,6 +483,7 @@ async def create_chat_completion(
468
  update_token_status(current_token, False, str(e))
469
  raise HTTPException(status_code=500, detail=f"内部服务器错误: {str(e)}")
470
 
 
471
  # 查看token状态的端点
472
  @app.get("/admin/tokens")
473
  async def get_token_status(admin_key: str = Header(None, alias="X-Admin-Key")):
@@ -476,15 +492,16 @@ async def get_token_status(admin_key: str = Header(None, alias="X-Admin-Key")):
476
  expected_admin_key = os.getenv("ADMIN_KEY", "admin")
477
  if not admin_key or admin_key != expected_admin_key:
478
  raise HTTPException(status_code=403, detail="Unauthorized")
479
-
480
  # 脱敏token,只显示前8位
481
  safe_status = {}
482
  for token, status in token_status.items():
483
  token_display = token[:8] + "..." if len(token) > 8 else token
484
  safe_status[token_display] = status
485
-
486
  return {"tokens": safe_status, "active_tokens": sum(1 for s in token_status.values() if s["active"])}
487
 
 
488
  # 手动刷新token状态
489
  @app.post("/admin/refresh-tokens")
490
  async def refresh_token_status(admin_key: str = Header(None, alias="X-Admin-Key")):
@@ -493,10 +510,11 @@ async def refresh_token_status(admin_key: str = Header(None, alias="X-Admin-Key"
493
  expected_admin_key = os.getenv("ADMIN_KEY", "admin")
494
  if not admin_key or admin_key != expected_admin_key:
495
  raise HTTPException(status_code=403, detail="Unauthorized")
496
-
497
  await initialize_token_status()
498
  return {"message": "所有token状态已刷新", "active_tokens": sum(1 for s in token_status.values() if s["active"])}
499
 
 
500
  # 模拟模型的路由
501
  @app.get("/v1/engines")
502
  @app.get("/v1/engines/{engine_id}")
@@ -504,6 +522,7 @@ async def engines_handler():
504
  """兼容旧的引擎API"""
505
  raise HTTPException(status_code=404, detail="引擎API已被弃用 请使用模型API")
506
 
 
507
  # 错误处理器
508
  @app.exception_handler(404)
509
  async def not_found_handler(request, exc):
@@ -515,6 +534,7 @@ async def not_found_handler(request, exc):
515
  }
516
  }, 404
517
 
 
518
  # 启动事件
519
  @app.on_event("startup")
520
  async def startup_event():
@@ -527,6 +547,7 @@ async def startup_event():
527
  active_tokens = sum(1 for s in token_status.values() if s["active"])
528
  logger.info(f"初始化完成 活跃token: {active_tokens}/{len(DEEPSIDER_TOKEN)}")
529
 
 
530
  # 主程序
531
  if __name__ == "__main__":
532
  # 启动服务器
 
60
  # Token负载均衡状态
61
  token_status = {}
62
 
63
+
64
  # 请求头
65
  def get_headers():
66
  global TOKEN_INDEX
 
68
  if len(DEEPSIDER_TOKEN) > 0:
69
  current_token = DEEPSIDER_TOKEN[TOKEN_INDEX % len(DEEPSIDER_TOKEN)]
70
  TOKEN_INDEX = (TOKEN_INDEX + 1) % len(DEEPSIDER_TOKEN)
71
+
72
  # 检查token状态
73
  if current_token in token_status and not token_status[current_token]["active"]:
74
  # 如果token不可用,尝试下一个
 
80
  break
81
  else:
82
  current_token = ""
83
+
84
  return {
85
  "accept": "*/*",
86
  "accept-encoding": "gzip, deflate, br, zstd",
 
99
  "authorization": f"Bearer {current_token}"
100
  }
101
 
102
+
103
  # OpenAI API请求模型
104
  class ChatMessage(BaseModel):
105
  role: str
106
  content: str
107
  name: Optional[str] = None
108
 
109
+
110
  class ChatCompletionRequest(BaseModel):
111
  model: str
112
  messages: List[ChatMessage]
 
119
  presence_penalty: Optional[float] = 0
120
  frequency_penalty: Optional[float] = 0
121
  user: Optional[str] = None
122
+
123
+
124
  # 初始化token状态
125
  async def initialize_token_status():
126
  """初始化检查所有token的状态和余额"""
127
  global token_status
128
+
129
  for token in DEEPSIDER_TOKEN:
130
  headers = {
131
  "accept": "*/*",
132
  "content-type": "application/json",
133
  "authorization": f"Bearer {token}"
134
  }
135
+
136
  try:
137
  # 获取账户余额信息
138
  response = requests.get(
139
  f"{DEEPSIDER_API_BASE.replace('/v2', '')}/quota/retrieve",
140
  headers=headers
141
  )
142
+
143
  active = False
144
  quota_info = {}
145
+
146
  if response.status_code == 200:
147
  data = response.json()
148
  if data.get('code') == 0:
149
  quota_list = data.get('data', {}).get('list', [])
150
+
151
  # 解析余额信息
152
  for item in quota_list:
153
  item_type = item.get('type', '')
154
  available = item.get('available', 0)
155
+
156
  if available > 0:
157
  active = True
158
+
159
  quota_info[item_type] = {
160
  "total": item.get('total', 0),
161
  "available": available,
162
  "title": item.get('title', '')
163
  }
164
+
165
  token_status[token] = {
166
  "active": active,
167
  "quota": quota_info,
168
  "last_checked": datetime.now(),
169
  "failed_count": 0
170
  }
171
+
172
  logger.info(f"Token {token[:8]}... 状态:{'活跃' if active else '无效'}")
173
+
174
  except Exception as e:
175
  logger.warning(f"检查Token {token[:8]}... 出错:{str(e)}")
176
  token_status[token] = {
 
180
  "failed_count": 0
181
  }
182
 
183
+
184
  # 工具函数
185
  def verify_api_key(api_key: str = Header(..., alias="Authorization")):
186
  """验证API密钥"""
 
188
  raise HTTPException(status_code=401, detail="Invalid API key format")
189
  return api_key.replace("Bearer ", "")
190
 
191
+
192
  def map_openai_to_deepsider_model(model: str) -> str:
193
  """将OpenAI模型名称映射到DeepSider模型名称"""
194
  return MODEL_MAPPING.get(model, "anthropic/claude-3.7-sonnet")
195
 
196
+
197
  def format_messages_for_deepsider(messages: List[ChatMessage]) -> str:
198
  """格式化消息列表为DeepSider API所需的提示格式"""
199
  prompt = ""
 
210
  else:
211
  # 其他角色按用户处理
212
  prompt += f"Human ({role}): {msg.content}\n\n"
213
+
214
  # 如果最后一个消息不是用户的 添加一个Human前缀引导模型回答
215
  if messages and messages[-1].role != "user":
216
  prompt += "Human: "
217
+
218
  return prompt.strip()
219
 
220
+
221
  def update_token_status(token: str, success: bool, error_message: str = None):
222
  """更新token的状态"""
223
  global token_status
224
+
225
  if token not in token_status:
226
  token_status[token] = {
227
  "active": True,
 
229
  "last_checked": datetime.now(),
230
  "failed_count": 0
231
  }
232
+
233
  if not success:
234
  token_status[token]["failed_count"] += 1
235
+
236
  # 如果失败消息包含余额不足,标记为不活跃
237
  if error_message and ("配额不足" in error_message or "quota" in error_message.lower()):
238
  token_status[token]["active"] = False
239
  logger.warning(f"Token {token[:8]}... 余额不足,已标记为不活跃")
240
+
241
  # 连续失败5次,也标记为不活跃
242
  if token_status[token]["failed_count"] >= 5:
243
  token_status[token]["active"] = False
 
246
  # 成功则重置失败计数
247
  token_status[token]["failed_count"] = 0
248
 
249
+
250
  async def generate_openai_response(full_response: str, request_id: str, model: str) -> Dict:
251
  """生成符合OpenAI API响应格式的完整响应"""
252
  timestamp = int(time.time())
 
272
  }
273
  }
274
 
275
+
276
  async def stream_openai_response(response, request_id: str, model: str, token: str):
277
  """流式返回OpenAI API格式的响应"""
278
  timestamp = int(time.time())
279
  full_response = ""
280
+
281
  try:
282
  # 将DeepSider响应流转换为OpenAI流格式
283
  for line in response.iter_lines():
284
  if not line:
285
  continue
286
+
287
  if line.startswith(b'data: '):
288
  try:
289
  data = json.loads(line[6:].decode('utf-8'))
290
+ if data.get('code') == 1005:
291
+ raise Exception(data.get("message"))
292
+
293
  if data.get('code') == 202 and data.get('data', {}).get('type') == "chat":
294
  # 获取正文内容
295
  content = data.get('data', {}).get('content', '')
296
  if content:
297
  full_response += content
298
+
299
  # 生成OpenAI格式的流式响应
300
  chunk = {
301
  "id": f"chatcmpl-{request_id}",
 
313
  ]
314
  }
315
  yield f"data: {json.dumps(chunk)}\n\n"
316
+
317
  elif data.get('code') == 203:
318
  # 生成完成信号
319
  chunk = {
 
331
  }
332
  yield f"data: {json.dumps(chunk)}\n\n"
333
  yield "data: [DONE]\n\n"
334
+
335
  except json.JSONDecodeError:
336
  logger.warning(f"无法解析响应: {line}")
337
+
338
  # 更新token状态(成功)
339
  update_token_status(token, True)
340
+
341
  except Exception as e:
342
  logger.error(f"流式响应处理出错: {str(e)}")
343
  # 更新token状态(失败)
344
  update_token_status(token, False, str(e))
345
+
346
  # 返回错误信息
347
  error_chunk = {
348
  "id": f"chatcmpl-{request_id}",
 
362
  yield f"data: {json.dumps(error_chunk)}\n\n"
363
  yield "data: [DONE]\n\n"
364
 
365
+
366
  # 路由定义
367
  @app.get("/")
368
  async def root():
369
  return {"message": "OpenAI API Proxy服务已启动 连接至DeepSider API"}
370
 
371
+
372
  @app.get("/v1/models")
373
  async def list_models(api_key: str = Depends(verify_api_key)):
374
  """列出可用的模型"""
 
380
  "created": int(time.time()),
381
  "owned_by": "openai-proxy"
382
  })
383
+
384
  return {
385
  "object": "list",
386
  "data": models
387
  }
388
 
389
+
390
  @app.post("/v1/chat/completions")
391
  async def create_chat_completion(
392
+ request: Request,
393
+ api_key: str = Depends(verify_api_key)
394
  ):
395
  """创建聊天完成API - 支持普通请求和流式请求"""
396
  # 解析请求体
397
  body = await request.json()
398
  chat_request = ChatCompletionRequest(**body)
399
+
400
  # 生成唯一请求ID
401
  request_id = datetime.now().strftime("%Y%m%d%H%M%S") + str(time.time_ns())[-6:]
402
+
403
  # 映射模型
404
  deepsider_model = map_openai_to_deepsider_model(chat_request.model)
405
+
406
  # 准备DeepSider API所需的提示
407
  prompt = format_messages_for_deepsider(chat_request.messages)
408
+
409
  # 准备请求体
410
  payload = {
411
  "model": deepsider_model,
 
413
  "webAccess": "close", # 默认关闭网络访问
414
  "timezone": "Asia/Shanghai"
415
  }
416
+
417
  # 获取当前token
418
  headers = get_headers()
419
  current_token = headers["authorization"].replace("Bearer ", "")
420
+
421
  try:
422
  # 发送请求到DeepSider API
423
  response = requests.post(
 
426
  json=payload,
427
  stream=True
428
  )
429
+
430
  # 检查响应状态
431
  if response.status_code != 200:
432
  error_msg = f"DeepSider API请求失败: {response.status_code}"
 
435
  error_msg += f" - {error_data.get('message', '')}"
436
  except:
437
  error_msg += f" - {response.text}"
438
+
439
  logger.error(error_msg)
440
+
441
  # 更新token状态
442
  update_token_status(current_token, False, error_msg)
443
+
444
  raise HTTPException(status_code=response.status_code, detail="API请求失败")
445
+
446
  # 处理流式或非流式响应
447
  if chat_request.stream:
448
  # 返回流式响应
 
456
  for line in response.iter_lines():
457
  if not line:
458
  continue
459
+
460
  if line.startswith(b'data: '):
461
  try:
462
  data = json.loads(line[6:].decode('utf-8'))
463
+
464
  if data.get('code') == 202 and data.get('data', {}).get('type') == "chat":
465
  content = data.get('data', {}).get('content', '')
466
  if content:
467
  full_response += content
468
+
469
  except json.JSONDecodeError:
470
  pass
471
+
472
  # 更新token状态(成功)
473
  update_token_status(current_token, True)
474
+
475
  # 返回OpenAI格式的完整响应
476
  return await generate_openai_response(full_response, request_id, chat_request.model)
477
+
478
  except HTTPException:
479
  raise
480
  except Exception as e:
 
483
  update_token_status(current_token, False, str(e))
484
  raise HTTPException(status_code=500, detail=f"内部服务器错误: {str(e)}")
485
 
486
+
487
  # 查看token状态的端点
488
  @app.get("/admin/tokens")
489
  async def get_token_status(admin_key: str = Header(None, alias="X-Admin-Key")):
 
492
  expected_admin_key = os.getenv("ADMIN_KEY", "admin")
493
  if not admin_key or admin_key != expected_admin_key:
494
  raise HTTPException(status_code=403, detail="Unauthorized")
495
+
496
  # 脱敏token,只显示前8位
497
  safe_status = {}
498
  for token, status in token_status.items():
499
  token_display = token[:8] + "..." if len(token) > 8 else token
500
  safe_status[token_display] = status
501
+
502
  return {"tokens": safe_status, "active_tokens": sum(1 for s in token_status.values() if s["active"])}
503
 
504
+
505
  # 手动刷新token状态
506
  @app.post("/admin/refresh-tokens")
507
  async def refresh_token_status(admin_key: str = Header(None, alias="X-Admin-Key")):
 
510
  expected_admin_key = os.getenv("ADMIN_KEY", "admin")
511
  if not admin_key or admin_key != expected_admin_key:
512
  raise HTTPException(status_code=403, detail="Unauthorized")
513
+
514
  await initialize_token_status()
515
  return {"message": "所有token状态已刷新", "active_tokens": sum(1 for s in token_status.values() if s["active"])}
516
 
517
+
518
  # 模拟模型的路由
519
  @app.get("/v1/engines")
520
  @app.get("/v1/engines/{engine_id}")
 
522
  """兼容旧的引擎API"""
523
  raise HTTPException(status_code=404, detail="引擎API已被弃用 请使用模型API")
524
 
525
+
526
  # 错误处理器
527
  @app.exception_handler(404)
528
  async def not_found_handler(request, exc):
 
534
  }
535
  }, 404
536
 
537
+
538
  # 启动事件
539
  @app.on_event("startup")
540
  async def startup_event():
 
547
  active_tokens = sum(1 for s in token_status.values() if s["active"])
548
  logger.info(f"初始化完成 活跃token: {active_tokens}/{len(DEEPSIDER_TOKEN)}")
549
 
550
+
551
  # 主程序
552
  if __name__ == "__main__":
553
  # 启动服务器