Calmlo commited on
Commit
46da92e
·
verified ·
1 Parent(s): 5838dda

Update server.js

Browse files
Files changed (1) hide show
  1. server.js +84 -125
server.js CHANGED
@@ -20,9 +20,10 @@ if (falKeys.length === 0) {
20
  process.exit(1);
21
  }
22
 
23
- console.log(`Loaded ${falKeys.length} Fal AI Keys.`);
24
 
25
- let currentFalKeyIndex = 0; // Index of the *next* key to try for a *new* request
 
26
 
27
  // --- End Key Management ---
28
 
@@ -232,12 +233,12 @@ function convertMessagesToFalPrompt(messages) {
232
  // === convertMessagesToFalPrompt 函数结束 ===
233
 
234
 
235
- // POST /v1/chat/completions endpoint ( Key 重试逻辑 - Stream 修正版)
236
  app.post('/v1/chat/completions', async (req, res) => {
237
  const { model, messages, stream = false, reasoning = false, ...restOpenAIParams } = req.body;
238
- const requestId = `req-${Date.now()}`; // Unique ID for this incoming request
239
 
240
- console.log(`[${requestId}] Received chat completion request for model: ${model}, stream: ${stream}`);
241
 
242
  if (!FAL_SUPPORTED_MODELS.includes(model)) {
243
  console.warn(`[${requestId}] Warning: Requested model '${model}' is not in the explicitly supported list.`);
@@ -247,8 +248,13 @@ app.post('/v1/chat/completions', async (req, res) => {
247
  return res.status(400).json({ error: 'Missing or invalid parameters: model and messages array are required.' });
248
  }
249
 
250
- let lastError = null; // Store the last error encountered during key rotation
251
- let success = false; // Flag to indicate if any key succeeded
 
 
 
 
 
252
 
253
  // 准备 Fal Input (只需要准备一次)
254
  const { prompt, system_prompt } = convertMessagesToFalPrompt(messages);
@@ -258,82 +264,69 @@ app.post('/v1/chat/completions', async (req, res) => {
258
  ...(system_prompt && { system_prompt: system_prompt }),
259
  reasoning: !!reasoning,
260
  };
 
261
 
262
- // 打印一次 Fal Input 和 Prompt 信息
263
- console.log(`[${requestId}] Fal Input (prepared once):`, JSON.stringify(falInput, null, 2));
264
- console.log(`[${requestId}] System Prompt Length:`, system_prompt?.length || 0);
265
- console.log(`[${requestId}] Prompt Length:`, prompt?.length || 0);
266
 
267
- // *** 重试循环:尝试最多 falKeys.length ***
268
- for (let attempt = 0; attempt < falKeys.length; attempt++) {
269
- const keyIndexToTry = (currentFalKeyIndex + attempt) % falKeys.length;
270
- const selectedFalKey = falKeys[keyIndexToTry];
271
- console.log(`[${requestId}] Attempt ${attempt + 1}/${falKeys.length}: Trying Fal Key at index ${keyIndexToTry}`);
 
 
 
 
272
 
273
  try {
274
- // 配置 fal 客户端 (每次尝试都重新配置)
275
- fal.config({
276
- credentials: selectedFalKey,
277
- });
278
 
279
- // --- 执行 Fal AI 调用 ---
280
  if (stream) {
281
  // --- 流式处理 ---
282
  res.setHeader('Content-Type', 'text/event-stream; charset=utf-8');
283
  res.setHeader('Cache-Control', 'no-cache');
284
  res.setHeader('Connection', 'keep-alive');
285
  res.setHeader('Access-Control-Allow-Origin', '*');
286
- // !! 不要在这里 flushHeaders !!
287
 
288
  let previousOutput = '';
289
  let firstEventProcessed = false;
290
- let streamFailedMidway = false; // Flag for errors after successful start
291
- let keyConfirmedWorking = false; // Flag if key actually produced data
292
 
293
  const falStream = await fal.stream("fal-ai/any-llm", { input: falInput });
294
 
295
- // 处理流
296
  try {
297
  for await (const event of falStream) {
298
  const currentOutput = (event && typeof event.output === 'string') ? event.output : '';
299
  const isPartial = (event && typeof event.partial === 'boolean') ? event.partial : true;
300
  const errorInfo = (event && event.error) ? event.error : null;
301
- const eventStatus = errorInfo?.status; // Check status within error object if present
302
 
303
- // --- 检查事件错误 ---
304
  if (errorInfo) {
305
- console.warn(`[${requestId}] Error received in fal stream event (Key Index ${keyIndexToTry}):`, errorInfo);
306
- lastError = errorInfo; // Store the error
307
 
308
- // 如果是第一次事件且是 Key 相关错误 (401/403/429),则中断此 key 的尝试
309
  if (!firstEventProcessed && (eventStatus === 401 || eventStatus === 403 || eventStatus === 429)) {
310
- console.warn(`[${requestId}] Key-related error (${eventStatus}) on first stream event for key index ${keyIndexToTry}. Aborting this attempt.`);
311
- // 不需要发送响应,直接跳出内部循环,让外部循环尝试下一个 key
312
- break; // Exit the inner `for await...of` loop
 
313
  } else {
314
- // 如果是其他错误,或者非第一次事件的错误,则认为是流处理失败
315
- console.error(`[${requestId}] Unrecoverable stream error or error after stream start.`);
316
- streamFailedMidway = true; // Mark stream as failed after start
317
  if (!res.headersSent) {
318
- // 如果还没发header,说明key可能一开始就返回错误,直接发送500
319
  res.status(500).json({ object: "error", message: `Fal Stream Error: ${JSON.stringify(errorInfo)}`, type:"fal_stream_error"});
320
- console.error(`[${requestId}] Headers not sent, responding with 500 JSON error.`);
321
  } else if (!res.writableEnded) {
322
- // 如果已发header,发送错误chunk
323
  const errorChunk = { id: `chatcmpl-${Date.now()}-error`, object: "chat.completion.chunk", created: Math.floor(Date.now() / 1000), model: model, choices: [{ index: 0, delta: {}, finish_reason: "error", message: { role: 'assistant', content: `Fal Stream Error: ${JSON.stringify(errorInfo)}` } }] };
324
  res.write(`data: ${JSON.stringify(errorChunk)}\n\n`);
325
- console.error(`[${requestId}] Headers sent, sending error chunk.`);
326
  }
327
- break; // Exit the inner `for await...of` loop
328
  }
329
  }
330
 
331
- // --- 成功接收到第一个非错误事件 ---
332
  if (!keyConfirmedWorking && !errorInfo) {
333
- success = true; // Mark overall success *for this request*
334
- keyConfirmedWorking = true; // Mark this specific key as working
335
- currentFalKeyIndex = (keyIndexToTry + 1) % falKeys.length; // Update global index for next request
336
- console.log(`[${requestId}] Key at index ${keyIndexToTry} confirmed working. Next request starts at index ${currentFalKeyIndex}.`);
337
  if (!res.headersSent) {
338
  res.flushHeaders();
339
  console.log(`[${requestId}] Stream headers flushed.`);
@@ -341,15 +334,12 @@ app.post('/v1/chat/completions', async (req, res) => {
341
  firstEventProcessed = true;
342
  }
343
 
344
- // --- 处理有效数据 ---
345
  if (!errorInfo) {
346
  let deltaContent = '';
347
  if (currentOutput.startsWith(previousOutput)) {
348
  deltaContent = currentOutput.substring(previousOutput.length);
349
  } else if (currentOutput.length > 0) {
350
- console.warn(`[${requestId}] Fal stream output mismatch detected. Sending full current output as delta.`, { previousLength: previousOutput.length, currentLength: currentOutput.length });
351
- deltaContent = currentOutput;
352
- previousOutput = ''; // Reset previous if mismatch
353
  }
354
  previousOutput = currentOutput;
355
 
@@ -360,78 +350,56 @@ app.post('/v1/chat/completions', async (req, res) => {
360
  }
361
  }
362
  }
363
-
364
  } // End `for await...of` loop
365
 
366
- // --- 循环后处理 ---
367
  if (streamFailedMidway) {
368
- // 如果是因为流中途错误跳出的,确保响应结束
369
  if (!res.writableEnded) {
370
- res.write(`data: [DONE]\n\n`); // Send DONE even after error as per OpenAI spec
371
- res.end();
372
- console.log(`[${requestId}] Stream ended with [DONE] after mid-stream error.`);
373
  }
374
- break; // Exit the outer key retry loop because the stream failed *after* starting
375
  } else if (keyConfirmedWorking) {
376
- // 如果 Key 正常工作且循环正常结束 (没有 break)
377
  if (!res.writableEnded) {
378
- res.write(`data: [DONE]\n\n`);
379
- res.end();
380
- console.log(`[${requestId}] Stream finished normally and [DONE] sent.`);
381
  }
382
- break; // Exit the outer key retry loop because we succeeded
383
  }
384
- // If loop finished without confirming the key worked and without mid-stream error (e.g., first event was key error)
385
- // let the outer loop continue to the next key.
386
 
387
  } catch (streamProcessingError) {
388
- // This catches errors in the stream processing *logic* itself, less likely
389
- console.error(`[${requestId}] Error during fal stream processing loop logic:`, streamProcessingError);
390
  lastError = streamProcessingError;
391
  if (!res.headersSent) {
392
  res.status(500).json({ object: "error", message: `Proxy Stream Processing Error: ${streamProcessingError.message}`, type:"proxy_internal_error"});
393
- console.error(`[${requestId}] Headers not sent, responding with 500 JSON error for stream logic failure.`);
394
  } else if (!res.writableEnded) {
395
  try {
396
  res.write(`data: ${JSON.stringify({ error: { message: "Proxy Stream processing error", type: "proxy_internal_error", details: streamProcessingError.message } })}\n\n`);
397
- res.write(`data: [DONE]\n\n`);
398
- res.end();
399
- console.error(`[${requestId}] Headers sent, sending error chunk for stream logic failure.`);
400
- } catch (finalError) {
401
- console.error(`[${requestId}] Error sending final error message to client:`, finalError);
402
- if (!res.writableEnded) { res.end(); }
403
- }
404
  }
405
- break; // Exit the outer key retry loop
406
  }
407
 
408
- // If we reached here and `success` is true, it means the stream finished successfully.
409
- if (success) {
410
- break; // Exit the outer key retry loop
411
- }
412
- // Otherwise, the stream ended because the first event was a key error, continue the outer loop.
413
 
414
 
415
  } else {
416
- // --- 非流式处理 (基本不变) ---
417
- console.log(`[${requestId}] Executing non-stream request with key index ${keyIndexToTry}...`);
418
  const result = await fal.subscribe("fal-ai/any-llm", { input: falInput, logs: true });
419
 
420
  if (result && result.error) {
421
- console.error(`[${requestId}] Fal-ai returned a business error with key index ${keyIndexToTry}:`, result.error);
422
  lastError = new Error(`Fal-ai error: ${JSON.stringify(result.error)}`);
423
- lastError.status = result.status || 500; // Use status from error if available
424
  lastError.type = "fal_ai_error";
425
- // Business errors (e.g., bad input) usually shouldn't be retried with other keys
426
- break; // Exit retry loop
427
  }
428
 
429
- console.log(`[${requestId}] Received non-stream result from fal-ai with key index ${keyIndexToTry}`);
430
-
431
- success = true; // Mark overall success
432
- currentFalKeyIndex = (keyIndexToTry + 1) % falKeys.length; // Update global index
433
- console.log(`[${requestId}] Key at index ${keyIndexToTry} successful (non-stream). Next request starts at index ${currentFalKeyIndex}.`);
434
-
435
  const openAIResponse = {
436
  id: `chatcmpl-${result.requestId || Date.now()}`, object: "chat.completion", created: Math.floor(Date.now() / 1000), model: model,
437
  choices: [{ index: 0, message: { role: "assistant", content: result.output || "" }, finish_reason: "stop" }],
@@ -440,72 +408,63 @@ app.post('/v1/chat/completions', async (req, res) => {
440
  ...(result.reasoning && { fal_reasoning: result.reasoning }),
441
  };
442
  res.json(openAIResponse);
443
- break; // 成功,跳出重试循环
444
  }
445
 
446
  } catch (error) {
447
- // This outer catch handles errors from fal.config, fal.stream setup (before first event), fal.subscribe setup
448
  lastError = error;
449
  const status = error?.status;
450
  const errorMessage = error?.body?.detail || error?.message || 'Unknown setup error';
 
 
451
 
452
- console.warn(`[${requestId}] Attempt ${attempt + 1} with key index ${keyIndexToTry} failed during setup. Status: ${status || 'N/A'}, Message: ${errorMessage}`);
453
- console.error("Setup Error details:", error); // Log full error
454
-
455
- // Check for key-related errors during setup
456
  if (status === 401 || status === 403 || status === 429) {
457
- console.log(`[${requestId}] Key-related setup error (${status}). Trying next key...`);
458
- // Continue the outer loop
 
 
459
  } else {
460
- // Unrecoverable setup error (e.g., network, internal fal error)
461
- console.error(`[${requestId}] Unrecoverable setup error encountered. Status: ${status || 'N/A'}. Stopping key rotation.`);
462
- break; // Exit the outer key retry loop
463
  }
464
  }
465
  } // --- ��束重试循环 ---
466
 
467
- // 如果循环结束了还没有成功 (所有 Key 都失败了或遇到不可恢复错误)
468
  if (!success) {
469
- console.error(`[${requestId}] All Fal Key attempts failed or an unrecoverable error occurred.`);
470
  if (!res.headersSent) {
471
- const statusCode = lastError?.status || 503; // Use status from last error (could be from setup or first stream event), default 503
472
  const errorMessage = (lastError instanceof Error) ? lastError.message : JSON.stringify(lastError);
473
- const detailMessage = lastError?.body?.detail || errorMessage; // Prefer detailed message
474
  const errorType = lastError?.type || (statusCode === 401 || statusCode === 403 || statusCode === 429 ? "key_error" : "proxy_error");
475
-
476
  console.error(`[${requestId}] Sending final error response. Status: ${statusCode}, Type: ${errorType}, Message: ${detailMessage}`);
477
-
478
  res.status(statusCode).json({
479
  object: "error",
480
- message: `All Fal Key attempts failed or an unrecoverable error occurred. Last error: ${detailMessage}`,
481
  type: errorType,
482
  param: null,
483
  code: statusCode === 429 ? "rate_limit_exceeded" : (statusCode === 401 || statusCode === 403 ? "invalid_api_key" : "service_unavailable")
484
  });
485
  } else if (!res.writableEnded) {
486
- // This case should be less likely now as stream errors are handled inside the loop
487
- console.error(`[${requestId}] Headers potentially sent, but request failed. Attempting to end stream.`);
488
  try {
489
- // Don't send another error chunk if one might have been sent already
490
- res.write(`data: [DONE]\n\n`);
491
- res.end();
492
- } catch (e) {
493
- console.error(`[${requestId}] Failed to write final [DONE] to stream:`, e);
494
- if (!res.writableEnded) res.end();
495
- }
496
  } else {
497
- console.error(`[${requestId}] Request failed, but response stream was already fully ended. Cannot send error.`);
498
  }
499
  }
500
 
501
  });
502
 
503
- // 启动服务器 (更新启动信息)
504
  app.listen(PORT, () => {
505
  console.log(`===================================================`);
506
- console.log(` Fal OpenAI Proxy Server (Key Rotation with Retry v2 + System Top + Separator + Recency)`); // 更新描述
507
  console.log(` Listening on port: ${PORT}`);
508
- console.log(` Loaded ${falKeys.length} Fal AI Keys for rotation.`);
509
  console.log(` Using Limits: System Prompt=${SYSTEM_PROMPT_LIMIT}, Prompt=${PROMPT_LIMIT}`);
510
  console.log(` API Key Auth Enabled: ${API_KEY ? 'Yes' : 'No'}`);
511
  console.log(` Chat Completions Endpoint: POST http://localhost:${PORT}/v1/chat/completions`);
@@ -513,7 +472,7 @@ app.listen(PORT, () => {
513
  console.log(`===================================================`);
514
  });
515
 
516
- // 根路径响应 (更新信息)
517
  app.get('/', (req, res) => {
518
- res.send('Fal OpenAI Proxy (Key Rotation with Retry v2 + System Top + Separator + Recency Strategy) is running.'); // 更新描述
519
  });
 
20
  process.exit(1);
21
  }
22
 
23
+ console.log(`Loaded ${falKeys.length} Fal AI Keys initially.`);
24
 
25
+ // 不再需要 currentFalKeyIndex
26
+ // let currentFalKeyIndex = 0;
27
 
28
  // --- End Key Management ---
29
 
 
233
  // === convertMessagesToFalPrompt 函数结束 ===
234
 
235
 
236
+ // POST /v1/chat/completions endpoint (随机 Key + 失败排除)
237
  app.post('/v1/chat/completions', async (req, res) => {
238
  const { model, messages, stream = false, reasoning = false, ...restOpenAIParams } = req.body;
239
+ const requestId = `req-${Date.now()}`;
240
 
241
+ console.log(`[${requestId}] Received chat completion request for model: ${model}, stream: ${stream}. Strategy: Random key with exclusion.`);
242
 
243
  if (!FAL_SUPPORTED_MODELS.includes(model)) {
244
  console.warn(`[${requestId}] Warning: Requested model '${model}' is not in the explicitly supported list.`);
 
248
  return res.status(400).json({ error: 'Missing or invalid parameters: model and messages array are required.' });
249
  }
250
 
251
+ let lastError = null;
252
+ let success = false;
253
+ let attempt = 0;
254
+ const maxAttempts = falKeys.length; // Safety limit
255
+
256
+ // *** 為此請求創建可用的 Key 列表副本 ***
257
+ let availableKeysForRequest = [...falKeys];
258
 
259
  // 准备 Fal Input (只需要准备一次)
260
  const { prompt, system_prompt } = convertMessagesToFalPrompt(messages);
 
264
  ...(system_prompt && { system_prompt: system_prompt }),
265
  reasoning: !!reasoning,
266
  };
267
+ console.log(`[${requestId}] Fal Input prepared. System Prompt Length: ${system_prompt?.length || 0}, Prompt Length: ${prompt?.length || 0}`);
268
 
 
 
 
 
269
 
270
+ // *** 重试循环:只要还有可用的 Key 且未达最大尝试次数 ***
271
+ while (availableKeysForRequest.length > 0 && attempt < maxAttempts) {
272
+ attempt++;
273
+ // *** 隨機選擇一個 Key ***
274
+ const randomIndex = Math.floor(Math.random() * availableKeysForRequest.length);
275
+ const selectedFalKey = availableKeysForRequest[randomIndex];
276
+ // Mask key in logs for security
277
+ const maskedKey = selectedFalKey.length > 8 ? `${selectedFalKey.substring(0, 4)}...${selectedFalKey.substring(selectedFalKey.length - 4)}` : selectedFalKey;
278
+ console.log(`[${requestId}] Attempt ${attempt}/${maxAttempts}: Trying random key (masked: ${maskedKey}). ${availableKeysForRequest.length} keys available.`);
279
 
280
  try {
281
+ fal.config({ credentials: selectedFalKey });
 
 
 
282
 
 
283
  if (stream) {
284
  // --- 流式处理 ---
285
  res.setHeader('Content-Type', 'text/event-stream; charset=utf-8');
286
  res.setHeader('Cache-Control', 'no-cache');
287
  res.setHeader('Connection', 'keep-alive');
288
  res.setHeader('Access-Control-Allow-Origin', '*');
 
289
 
290
  let previousOutput = '';
291
  let firstEventProcessed = false;
292
+ let streamFailedMidway = false;
293
+ let keyConfirmedWorking = false;
294
 
295
  const falStream = await fal.stream("fal-ai/any-llm", { input: falInput });
296
 
 
297
  try {
298
  for await (const event of falStream) {
299
  const currentOutput = (event && typeof event.output === 'string') ? event.output : '';
300
  const isPartial = (event && typeof event.partial === 'boolean') ? event.partial : true;
301
  const errorInfo = (event && event.error) ? event.error : null;
302
+ const eventStatus = errorInfo?.status;
303
 
 
304
  if (errorInfo) {
305
+ console.warn(`[${requestId}] Error in stream event (Key: ${maskedKey}):`, errorInfo);
306
+ lastError = errorInfo;
307
 
 
308
  if (!firstEventProcessed && (eventStatus === 401 || eventStatus === 403 || eventStatus === 429)) {
309
+ console.warn(`[${requestId}] Key ${maskedKey} failed (${eventStatus}) on first event. Excluding it for this request.`);
310
+ availableKeysForRequest.splice(randomIndex, 1); // 从可用列表移除
311
+ console.log(`[${requestId}] ${availableKeysForRequest.length} keys remaining for this request.`);
312
+ break; // 中断内部循环,外部循环将尝试下一个随机 key
313
  } else {
314
+ console.error(`[${requestId}] Unrecoverable stream error or error after stream start (Key: ${maskedKey}).`);
315
+ streamFailedMidway = true;
 
316
  if (!res.headersSent) {
 
317
  res.status(500).json({ object: "error", message: `Fal Stream Error: ${JSON.stringify(errorInfo)}`, type:"fal_stream_error"});
 
318
  } else if (!res.writableEnded) {
 
319
  const errorChunk = { id: `chatcmpl-${Date.now()}-error`, object: "chat.completion.chunk", created: Math.floor(Date.now() / 1000), model: model, choices: [{ index: 0, delta: {}, finish_reason: "error", message: { role: 'assistant', content: `Fal Stream Error: ${JSON.stringify(errorInfo)}` } }] };
320
  res.write(`data: ${JSON.stringify(errorChunk)}\n\n`);
 
321
  }
322
+ break; // 中断内部循环
323
  }
324
  }
325
 
 
326
  if (!keyConfirmedWorking && !errorInfo) {
327
+ success = true;
328
+ keyConfirmedWorking = true;
329
+ console.log(`[${requestId}] Key ${maskedKey} confirmed working (stream).`);
 
330
  if (!res.headersSent) {
331
  res.flushHeaders();
332
  console.log(`[${requestId}] Stream headers flushed.`);
 
334
  firstEventProcessed = true;
335
  }
336
 
 
337
  if (!errorInfo) {
338
  let deltaContent = '';
339
  if (currentOutput.startsWith(previousOutput)) {
340
  deltaContent = currentOutput.substring(previousOutput.length);
341
  } else if (currentOutput.length > 0) {
342
+ deltaContent = currentOutput; previousOutput = '';
 
 
343
  }
344
  previousOutput = currentOutput;
345
 
 
350
  }
351
  }
352
  }
 
353
  } // End `for await...of` loop
354
 
 
355
  if (streamFailedMidway) {
 
356
  if (!res.writableEnded) {
357
+ res.write(`data: [DONE]\n\n`); res.end();
 
 
358
  }
359
+ break; // 中断外部循环,因为流在中途失败
360
  } else if (keyConfirmedWorking) {
 
361
  if (!res.writableEnded) {
362
+ res.write(`data: [DONE]\n\n`); res.end();
 
 
363
  }
364
+ break; // 中断外部循环,因为成功
365
  }
366
+ // 如果内部循环因为首个事件是 key 错误而中断,外部循环会继续
 
367
 
368
  } catch (streamProcessingError) {
369
+ console.error(`[${requestId}] Error during fal stream processing loop logic (Key: ${maskedKey}):`, streamProcessingError);
 
370
  lastError = streamProcessingError;
371
  if (!res.headersSent) {
372
  res.status(500).json({ object: "error", message: `Proxy Stream Processing Error: ${streamProcessingError.message}`, type:"proxy_internal_error"});
 
373
  } else if (!res.writableEnded) {
374
  try {
375
  res.write(`data: ${JSON.stringify({ error: { message: "Proxy Stream processing error", type: "proxy_internal_error", details: streamProcessingError.message } })}\n\n`);
376
+ res.write(`data: [DONE]\n\n`); res.end();
377
+ } catch (finalError) { if (!res.writableEnded) { res.end(); } }
 
 
 
 
 
378
  }
379
+ break; // 中断外部循环
380
  }
381
 
382
+ // 如果流成功或中途失败,外部循环会 break;如果因首个 key 错误而中断内部循环,则外部循环继续
383
+ if (success || streamFailedMidway) {
384
+ break;
385
+ }
 
386
 
387
 
388
  } else {
389
+ // --- 非流式处理 ---
390
+ console.log(`[${requestId}] Executing non-stream request (Key: ${maskedKey})...`);
391
  const result = await fal.subscribe("fal-ai/any-llm", { input: falInput, logs: true });
392
 
393
  if (result && result.error) {
394
+ console.error(`[${requestId}] Fal-ai returned a business error (Key: ${maskedKey}):`, result.error);
395
  lastError = new Error(`Fal-ai error: ${JSON.stringify(result.error)}`);
396
+ lastError.status = result.status || 500;
397
  lastError.type = "fal_ai_error";
398
+ break; // 业务错误,中断重试
 
399
  }
400
 
401
+ console.log(`[${requestId}] Received non-stream result (Key: ${maskedKey}).`);
402
+ success = true; // 标记成功
 
 
 
 
403
  const openAIResponse = {
404
  id: `chatcmpl-${result.requestId || Date.now()}`, object: "chat.completion", created: Math.floor(Date.now() / 1000), model: model,
405
  choices: [{ index: 0, message: { role: "assistant", content: result.output || "" }, finish_reason: "stop" }],
 
408
  ...(result.reasoning && { fal_reasoning: result.reasoning }),
409
  };
410
  res.json(openAIResponse);
411
+ break; // 成功,中断外部循环
412
  }
413
 
414
  } catch (error) {
415
+ // Catch errors from fal.config, fal.stream/subscribe setup
416
  lastError = error;
417
  const status = error?.status;
418
  const errorMessage = error?.body?.detail || error?.message || 'Unknown setup error';
419
+ console.warn(`[${requestId}] Attempt ${attempt} with key ${maskedKey} failed during setup. Status: ${status || 'N/A'}, Message: ${errorMessage}`);
420
+ // console.error("Setup Error details:", error); // Log full error for debug if needed
421
 
 
 
 
 
422
  if (status === 401 || status === 403 || status === 429) {
423
+ console.warn(`[${requestId}] Key ${maskedKey} failed (${status}) during setup. Excluding it for this request.`);
424
+ availableKeysForRequest.splice(randomIndex, 1); // 从可用列表移除
425
+ console.log(`[${requestId}] ${availableKeysForRequest.length} keys remaining for this request.`);
426
+ // 继续外部循环
427
  } else {
428
+ console.error(`[${requestId}] Unrecoverable setup error encountered (Key: ${maskedKey}). Status: ${status || 'N/A'}. Stopping retries.`);
429
+ break; // 中断外部循环
 
430
  }
431
  }
432
  } // --- ��束重试循环 ---
433
 
434
+ // 如果循环结束了还没有成功
435
  if (!success) {
436
+ console.error(`[${requestId}] All attempts failed or an unrecoverable error occurred. No available keys left or max attempts reached.`);
437
  if (!res.headersSent) {
438
+ const statusCode = lastError?.status || 503;
439
  const errorMessage = (lastError instanceof Error) ? lastError.message : JSON.stringify(lastError);
440
+ const detailMessage = lastError?.body?.detail || errorMessage;
441
  const errorType = lastError?.type || (statusCode === 401 || statusCode === 403 || statusCode === 429 ? "key_error" : "proxy_error");
 
442
  console.error(`[${requestId}] Sending final error response. Status: ${statusCode}, Type: ${errorType}, Message: ${detailMessage}`);
 
443
  res.status(statusCode).json({
444
  object: "error",
445
+ message: `All attempts failed. Last error: ${detailMessage}`,
446
  type: errorType,
447
  param: null,
448
  code: statusCode === 429 ? "rate_limit_exceeded" : (statusCode === 401 || statusCode === 403 ? "invalid_api_key" : "service_unavailable")
449
  });
450
  } else if (!res.writableEnded) {
451
+ console.error(`[${requestId}] Headers potentially sent, but request failed. Ending stream.`);
 
452
  try {
453
+ res.write(`data: [DONE]\n\n`); res.end();
454
+ } catch (e) { if (!res.writableEnded) res.end(); }
 
 
 
 
 
455
  } else {
456
+ console.error(`[${requestId}] Request failed, but response stream was already fully ended.`);
457
  }
458
  }
459
 
460
  });
461
 
462
+ // 启动服务器
463
  app.listen(PORT, () => {
464
  console.log(`===================================================`);
465
+ console.log(` Fal OpenAI Proxy Server (Random Key + Exclusion Strategy)`); // 更新描述
466
  console.log(` Listening on port: ${PORT}`);
467
+ console.log(` Loaded ${falKeys.length} Fal AI Keys initially.`);
468
  console.log(` Using Limits: System Prompt=${SYSTEM_PROMPT_LIMIT}, Prompt=${PROMPT_LIMIT}`);
469
  console.log(` API Key Auth Enabled: ${API_KEY ? 'Yes' : 'No'}`);
470
  console.log(` Chat Completions Endpoint: POST http://localhost:${PORT}/v1/chat/completions`);
 
472
  console.log(`===================================================`);
473
  });
474
 
475
+ // 根路径响应
476
  app.get('/', (req, res) => {
477
+ res.send('Fal OpenAI Proxy (Random Key + Exclusion Strategy) is running.'); // 更新描述
478
  });