Calmlo commited on
Commit
6a44de0
·
verified ·
1 Parent(s): 8f3811c

Update server.js

Browse files
Files changed (1) hide show
  1. server.js +33 -64
server.js CHANGED
@@ -2,11 +2,13 @@ import express from 'express';
2
  import { fal } from '@fal-ai/client';
3
 
4
  // --- Multi-Key Configuration ---
5
- const rawFalKeys = process.env.FAL_KEYS; // Expect comma-separated keys: key1,key2,key3
 
6
  const API_KEY = process.env.API_KEY; // Custom API Key for proxy auth remains the same
7
 
8
  if (!rawFalKeys) {
9
- console.error("Error: FAL_KEYS environment variable is not set (should be comma-separated).");
 
10
  process.exit(1);
11
  }
12
 
@@ -26,18 +28,19 @@ let falKeys = rawFalKeys.split(',')
26
  }));
27
 
28
  if (falKeys.length === 0) {
29
- console.error("Error: No valid FAL_KEYS found after processing the environment variable.");
 
30
  process.exit(1);
31
  }
32
 
33
  let currentKeyIndex = 0;
34
  const failedKeyCooldown = 60 * 1000; // Cooldown period in milliseconds (e.g., 60 seconds) before retrying a failed key
35
 
36
- console.log(`Loaded ${falKeys.length} FAL API Key(s).`);
 
37
  console.log(`Failed key cooldown period: ${failedKeyCooldown / 1000} seconds.`);
38
 
39
  // NOTE: We will configure fal client per request now, so initial global config is removed.
40
- // fal.config({ ... }); // Removed
41
 
42
  // --- Key Management Functions ---
43
 
@@ -126,7 +129,6 @@ const PORT = process.env.PORT || 3000;
126
 
127
  // API Key 鉴权中间件 (Remains the same, checks custom API_KEY)
128
  const apiKeyAuth = (req, res, next) => {
129
- // ... (Keep existing apiKeyAuth middleware code) ...
130
  const authHeader = req.headers['authorization'];
131
 
132
  if (!authHeader) {
@@ -157,7 +159,7 @@ const SYSTEM_PROMPT_LIMIT = 4800;
157
  // === 限制定义结束 ===
158
 
159
  // 定义 fal-ai/any-llm 支持的模型列表 (Remains the same)
160
- const FAL_SUPPORTED_MODELS = [ /* ... model list ... */
161
  "anthropic/claude-3.7-sonnet",
162
  "anthropic/claude-3.5-sonnet",
163
  "anthropic/claude-3-5-haiku",
@@ -178,7 +180,7 @@ const FAL_SUPPORTED_MODELS = [ /* ... model list ... */
178
  ];
179
 
180
  // Helper function to get owner from model ID (Remains the same)
181
- const getOwner = (modelId) => { /* ... */
182
  if (modelId && modelId.includes('/')) {
183
  return modelId.split('/')[0];
184
  }
@@ -186,7 +188,7 @@ const getOwner = (modelId) => { /* ... */
186
  };
187
 
188
  // GET /v1/models endpoint (Remains the same)
189
- app.get('/v1/models', (req, res) => { /* ... */
190
  console.log("Received request for GET /v1/models");
191
  try {
192
  const modelsData = FAL_SUPPORTED_MODELS.map(modelId => ({
@@ -201,7 +203,7 @@ app.get('/v1/models', (req, res) => { /* ... */
201
  });
202
 
203
  // === convertMessagesToFalPrompt 函数 (Remains the same) ===
204
- function convertMessagesToFalPrompt(messages) { /* ... */
205
  let fixed_system_prompt_content = "";
206
  const conversation_message_blocks = [];
207
  // console.log(`Original messages count: ${messages.length}`); // Less verbose logging
@@ -347,48 +349,35 @@ async function tryFalCallWithFailover(operation, functionId, params) {
347
  // --- Configure fal client with the selected key for this attempt ---
348
  // WARNING: This global config change might have concurrency issues in high-load scenarios
349
  // if the fal client library doesn't isolate requests properly.
350
- // A better approach would be per-request credentials if the library supported it.
351
  fal.config({ credentials: currentFalKey });
352
 
353
  if (operation === 'stream') {
354
- // For streams, the retry logic primarily applies to *initiating* the stream.
355
- // If the stream starts but fails later, this loop won't restart it.
356
  const streamResult = await fal.stream(functionId, params);
357
  console.log(`Successfully initiated stream with key ending in ...${currentFalKey.slice(-4)}`);
358
- // If successful, return the stream iterator
359
  return streamResult;
360
  } else { // 'subscribe' (non-stream)
361
  const result = await fal.subscribe(functionId, params);
362
  console.log(`Successfully completed subscribe request with key ending in ...${currentFalKey.slice(-4)}`);
363
 
364
- // Check for application-level errors *returned* by fal within the result object
365
- // These are usually model errors, not key errors. Let them propagate.
366
  if (result && result.error) {
367
  console.warn(`Fal-ai returned an application error (non-stream) with key ...${currentFalKey.slice(-4)}: ${JSON.stringify(result.error)}`);
368
- // Don't mark key as failed for application errors unless specifically known.
369
  }
370
- // Return the result object (which might contain an error)
371
  return result;
372
  }
373
  } catch (error) {
374
  console.error(`Error using key ending in ...${currentFalKey.slice(-4)}:`, error.message || error);
375
- lastError = error; // Store the error
376
 
377
- // Check if the error is likely related to the key itself
378
  if (isKeyRelatedError(error)) {
379
  markKeyFailed(keyInfo);
380
  console.log(`Key marked as failed. Trying next key if available...`);
381
- // Continue the loop to try the next key
382
  } else {
383
- // If the error is not key-related (e.g., network issue, fal internal error),
384
- // stop retrying and propagate the error immediately.
385
  console.error("Non-key related error occurred. Aborting retries.");
386
- throw error; // Re-throw the error
387
  }
388
  }
389
  }
390
 
391
- // If the loop finishes, all keys were tried and failed with key-related errors.
392
  console.error("All FAL keys failed after attempting each one.");
393
  throw new Error(lastError ? `All FAL keys failed. Last error: ${lastError.message}` : "All FAL API keys failed.");
394
  }
@@ -402,7 +391,6 @@ app.post('/v1/chat/completions', async (req, res) => {
402
 
403
  if (!FAL_SUPPORTED_MODELS.includes(model)) {
404
  console.warn(`Warning: Requested model '${model}' is not in the explicitly supported list.`);
405
- // Allow proceeding, maybe fal-ai/any-llm supports it dynamically
406
  }
407
  if (!model || !messages || !Array.isArray(messages) || messages.length === 0) {
408
  console.error("Invalid request parameters:", { model, messages: Array.isArray(messages) ? messages.length : typeof messages });
@@ -416,31 +404,24 @@ app.post('/v1/chat/completions', async (req, res) => {
416
  model: model,
417
  prompt: prompt,
418
  ...(system_prompt && { system_prompt: system_prompt }),
419
- reasoning: !!reasoning, // Ensure boolean
420
- // Spread any other OpenAI compatible params if needed, though fal might ignore them
421
- // ...restOpenAIParams // Be careful with spreading unknown params
422
  };
423
 
424
  console.log("Prepared Fal Input (lengths):", { system_prompt: system_prompt?.length, prompt: prompt?.length });
425
- // Optional: Log full input for debugging (can be verbose)
426
- // console.log("Full Fal Input:", JSON.stringify(falInput, null, 2));
427
 
428
- // --- Use the failover wrapper for the Fal API call ---
429
  if (stream) {
430
  res.setHeader('Content-Type', 'text/event-stream; charset=utf-8');
431
  res.setHeader('Cache-Control', 'no-cache');
432
  res.setHeader('Connection', 'keep-alive');
433
- res.setHeader('Access-Control-Allow-Origin', '*'); // Keep CORS header if needed
434
  res.flushHeaders();
435
 
436
  let previousOutput = '';
437
  let falStream;
438
 
439
  try {
440
- // --- Initiate stream using failover ---
441
  falStream = await tryFalCallWithFailover('stream', "fal-ai/any-llm", { input: falInput });
442
 
443
- // --- Process the stream (existing logic) ---
444
  for await (const event of falStream) {
445
  const currentOutput = (event && typeof event.output === 'string') ? event.output : '';
446
  const isPartial = (event && typeof event.partial === 'boolean') ? event.partial : true;
@@ -448,11 +429,9 @@ app.post('/v1/chat/completions', async (req, res) => {
448
 
449
  if (errorInfo) {
450
  console.error("Error received *during* fal stream:", errorInfo);
451
- // Note: This error happened *after* successful stream initiation.
452
- // We send an error chunk, but don't mark the key failed here as the connection worked initially.
453
  const errorChunk = { id: `chatcmpl-${Date.now()}-error`, object: "chat.completion.chunk", created: Math.floor(Date.now() / 1000), model: model, choices: [{ index: 0, delta: {}, finish_reason: "error", message: { role: 'assistant', content: `Fal Stream Error: ${JSON.stringify(errorInfo)}` } }] };
454
  res.write(`data: ${JSON.stringify(errorChunk)}\n\n`);
455
- break; // Stop processing this stream
456
  }
457
 
458
  let deltaContent = '';
@@ -461,11 +440,11 @@ app.post('/v1/chat/completions', async (req, res) => {
461
  } else if (currentOutput.length > 0) {
462
  console.warn("Fal stream output mismatch detected. Sending full current output as delta.", { previousLength: previousOutput.length, currentLength: currentOutput.length });
463
  deltaContent = currentOutput;
464
- previousOutput = ''; // Reset previous since we sent full
465
  }
466
- previousOutput = currentOutput; // Update previousOutput for next iteration
467
 
468
- if (deltaContent || !isPartial) { // Send delta or final chunk
469
  const openAIChunk = { id: `chatcmpl-${Date.now()}`, object: "chat.completion.chunk", created: Math.floor(Date.now() / 1000), model: model, choices: [{ index: 0, delta: { content: deltaContent }, finish_reason: isPartial === false ? "stop" : null }] };
470
  res.write(`data: ${JSON.stringify(openAIChunk)}\n\n`);
471
  }
@@ -475,12 +454,9 @@ app.post('/v1/chat/completions', async (req, res) => {
475
  console.log("Stream finished successfully.");
476
 
477
  } catch (streamError) {
478
- // This catch handles errors from tryFalCallWithFailover OR the stream processing loop
479
  console.error('Error during stream processing:', streamError);
480
- // Don't try to write to response if headers already sent and stream failed mid-way uncleanly
481
  if (!res.writableEnded) {
482
  try {
483
- // Send a final error chunk if possible
484
  const errorDetails = (streamError instanceof Error) ? streamError.message : JSON.stringify(streamError);
485
  const finalErrorChunk = { error: { message: "Stream failed", type: "proxy_error", details: errorDetails } };
486
  res.write(`data: ${JSON.stringify(finalErrorChunk)}\n\n`);
@@ -488,65 +464,57 @@ app.post('/v1/chat/completions', async (req, res) => {
488
  res.end();
489
  } catch (finalError) {
490
  console.error('Error sending final stream error message to client:', finalError);
491
- if (!res.writableEnded) { res.end(); } // Ensure response ends
492
  }
493
  }
494
  }
495
 
496
  } else { // Non-stream
497
  console.log("Executing non-stream request with failover...");
498
- // --- Call subscribe using failover ---
499
  const result = await tryFalCallWithFailover('subscribe', "fal-ai/any-llm", { input: falInput, logs: true });
500
 
501
  console.log("Received non-stream result from fal-ai via failover wrapper.");
502
- // Optional: Log full result for debugging
503
- // console.log("Full non-stream result:", JSON.stringify(result, null, 2));
504
 
505
- // Check for application-level errors *within* the successful response
506
  if (result && result.error) {
507
  console.error("Fal-ai returned an application error in non-stream mode (after successful API call):", result.error);
508
- // Return a 500 status but format it like OpenAI error if possible
509
  return res.status(500).json({
510
  object: "error",
511
  message: `Fal-ai application error: ${JSON.stringify(result.error)}`,
512
  type: "fal_ai_error",
513
  param: null,
514
- code: result.error.code || null // Include code if available
515
  });
516
  }
517
 
518
- // --- Format successful non-stream response (existing logic) ---
519
  const openAIResponse = {
520
- id: `chatcmpl-${result?.requestId || Date.now()}`, // Use requestId if available
521
  object: "chat.completion",
522
  created: Math.floor(Date.now() / 1000),
523
- model: model, // Use the requested model ID
524
  choices: [{
525
  index: 0,
526
  message: {
527
  role: "assistant",
528
- content: result?.output || "" // Safely access output
529
  },
530
- finish_reason: "stop" // Assume stop for non-stream
531
  }],
532
- usage: { // Fal doesn't provide token usage
533
  prompt_tokens: null,
534
  completion_tokens: null,
535
  total_tokens: null
536
  },
537
- system_fingerprint: null, // Not provided by fal
538
- ...(result?.reasoning && { fal_reasoning: result.reasoning }), // Include reasoning if present
539
  };
540
  res.json(openAIResponse);
541
  console.log("Returned non-stream response successfully.");
542
  }
543
 
544
  } catch (error) {
545
- // This catches errors from setup, convertMessagesToFalPrompt, or tryFalCallWithFailover (if all keys failed or non-key error occurred)
546
  console.error('Unhandled error in /v1/chat/completions:', error);
547
  if (!res.headersSent) {
548
  const errorMessage = (error instanceof Error) ? error.message : JSON.stringify(error);
549
- // Provide a more informative error message
550
  const errorType = error.message?.includes("All FAL keys failed") ? "api_key_error" : "proxy_internal_error";
551
  res.status(500).json({
552
  error: {
@@ -557,7 +525,7 @@ app.post('/v1/chat/completions', async (req, res) => {
557
  });
558
  } else if (!res.writableEnded) {
559
  console.error("Headers already sent, attempting to end response after error.");
560
- res.end(); // Try to end the response if possible
561
  }
562
  }
563
  });
@@ -567,7 +535,8 @@ app.listen(PORT, () => {
567
  console.log(`===========================================================`);
568
  console.log(` Fal OpenAI Proxy Server (Multi-Key Failover)`);
569
  console.log(` Listening on port: ${PORT}`);
570
- console.log(` Loaded ${falKeys.length} FAL API Key(s).`);
 
571
  console.log(` API Key Auth Enabled: ${API_KEY ? 'Yes' : 'No'}`);
572
  console.log(` Limits: System Prompt=${SYSTEM_PROMPT_LIMIT}, Prompt=${PROMPT_LIMIT}`);
573
  console.log(` Chat Completions: POST http://localhost:${PORT}/v1/chat/completions`);
@@ -575,7 +544,7 @@ app.listen(PORT, () => {
575
  console.log(`===========================================================`);
576
  });
577
 
578
- // Root path response (Remains the same)
579
  app.get('/', (req, res) => {
580
  res.send('Fal OpenAI Proxy (Multi-Key Failover) is running.');
581
  });
 
2
  import { fal } from '@fal-ai/client';
3
 
4
  // --- Multi-Key Configuration ---
5
+ // *** 使用 FAL_KEY 环境变量读取逗号分隔的密钥 ***
6
+ const rawFalKeys = process.env.FAL_KEY; // Expect comma-separated keys: key1,key2,key3 in FAL_KEY
7
  const API_KEY = process.env.API_KEY; // Custom API Key for proxy auth remains the same
8
 
9
  if (!rawFalKeys) {
10
+ // *** 更新错误信息以引用 FAL_KEY ***
11
+ console.error("Error: FAL_KEY environment variable is not set (should be comma-separated).");
12
  process.exit(1);
13
  }
14
 
 
28
  }));
29
 
30
  if (falKeys.length === 0) {
31
+ // *** 更新错误信息以引用 FAL_KEY ***
32
+ console.error("Error: No valid keys found in FAL_KEY after processing the environment variable.");
33
  process.exit(1);
34
  }
35
 
36
  let currentKeyIndex = 0;
37
  const failedKeyCooldown = 60 * 1000; // Cooldown period in milliseconds (e.g., 60 seconds) before retrying a failed key
38
 
39
+ // *** 更新日志信息以引用 FAL_KEY ***
40
+ console.log(`Loaded ${falKeys.length} FAL API Key(s) from FAL_KEY environment variable.`);
41
  console.log(`Failed key cooldown period: ${failedKeyCooldown / 1000} seconds.`);
42
 
43
  // NOTE: We will configure fal client per request now, so initial global config is removed.
 
44
 
45
  // --- Key Management Functions ---
46
 
 
129
 
130
  // API Key 鉴权中间件 (Remains the same, checks custom API_KEY)
131
  const apiKeyAuth = (req, res, next) => {
 
132
  const authHeader = req.headers['authorization'];
133
 
134
  if (!authHeader) {
 
159
  // === 限制定义结束 ===
160
 
161
  // 定义 fal-ai/any-llm 支持的模型列表 (Remains the same)
162
+ const FAL_SUPPORTED_MODELS = [
163
  "anthropic/claude-3.7-sonnet",
164
  "anthropic/claude-3.5-sonnet",
165
  "anthropic/claude-3-5-haiku",
 
180
  ];
181
 
182
  // Helper function to get owner from model ID (Remains the same)
183
+ const getOwner = (modelId) => {
184
  if (modelId && modelId.includes('/')) {
185
  return modelId.split('/')[0];
186
  }
 
188
  };
189
 
190
  // GET /v1/models endpoint (Remains the same)
191
+ app.get('/v1/models', (req, res) => {
192
  console.log("Received request for GET /v1/models");
193
  try {
194
  const modelsData = FAL_SUPPORTED_MODELS.map(modelId => ({
 
203
  });
204
 
205
  // === convertMessagesToFalPrompt 函数 (Remains the same) ===
206
+ function convertMessagesToFalPrompt(messages) {
207
  let fixed_system_prompt_content = "";
208
  const conversation_message_blocks = [];
209
  // console.log(`Original messages count: ${messages.length}`); // Less verbose logging
 
349
  // --- Configure fal client with the selected key for this attempt ---
350
  // WARNING: This global config change might have concurrency issues in high-load scenarios
351
  // if the fal client library doesn't isolate requests properly.
 
352
  fal.config({ credentials: currentFalKey });
353
 
354
  if (operation === 'stream') {
 
 
355
  const streamResult = await fal.stream(functionId, params);
356
  console.log(`Successfully initiated stream with key ending in ...${currentFalKey.slice(-4)}`);
 
357
  return streamResult;
358
  } else { // 'subscribe' (non-stream)
359
  const result = await fal.subscribe(functionId, params);
360
  console.log(`Successfully completed subscribe request with key ending in ...${currentFalKey.slice(-4)}`);
361
 
 
 
362
  if (result && result.error) {
363
  console.warn(`Fal-ai returned an application error (non-stream) with key ...${currentFalKey.slice(-4)}: ${JSON.stringify(result.error)}`);
 
364
  }
 
365
  return result;
366
  }
367
  } catch (error) {
368
  console.error(`Error using key ending in ...${currentFalKey.slice(-4)}:`, error.message || error);
369
+ lastError = error;
370
 
 
371
  if (isKeyRelatedError(error)) {
372
  markKeyFailed(keyInfo);
373
  console.log(`Key marked as failed. Trying next key if available...`);
 
374
  } else {
 
 
375
  console.error("Non-key related error occurred. Aborting retries.");
376
+ throw error;
377
  }
378
  }
379
  }
380
 
 
381
  console.error("All FAL keys failed after attempting each one.");
382
  throw new Error(lastError ? `All FAL keys failed. Last error: ${lastError.message}` : "All FAL API keys failed.");
383
  }
 
391
 
392
  if (!FAL_SUPPORTED_MODELS.includes(model)) {
393
  console.warn(`Warning: Requested model '${model}' is not in the explicitly supported list.`);
 
394
  }
395
  if (!model || !messages || !Array.isArray(messages) || messages.length === 0) {
396
  console.error("Invalid request parameters:", { model, messages: Array.isArray(messages) ? messages.length : typeof messages });
 
404
  model: model,
405
  prompt: prompt,
406
  ...(system_prompt && { system_prompt: system_prompt }),
407
+ reasoning: !!reasoning,
 
 
408
  };
409
 
410
  console.log("Prepared Fal Input (lengths):", { system_prompt: system_prompt?.length, prompt: prompt?.length });
 
 
411
 
 
412
  if (stream) {
413
  res.setHeader('Content-Type', 'text/event-stream; charset=utf-8');
414
  res.setHeader('Cache-Control', 'no-cache');
415
  res.setHeader('Connection', 'keep-alive');
416
+ res.setHeader('Access-Control-Allow-Origin', '*');
417
  res.flushHeaders();
418
 
419
  let previousOutput = '';
420
  let falStream;
421
 
422
  try {
 
423
  falStream = await tryFalCallWithFailover('stream', "fal-ai/any-llm", { input: falInput });
424
 
 
425
  for await (const event of falStream) {
426
  const currentOutput = (event && typeof event.output === 'string') ? event.output : '';
427
  const isPartial = (event && typeof event.partial === 'boolean') ? event.partial : true;
 
429
 
430
  if (errorInfo) {
431
  console.error("Error received *during* fal stream:", errorInfo);
 
 
432
  const errorChunk = { id: `chatcmpl-${Date.now()}-error`, object: "chat.completion.chunk", created: Math.floor(Date.now() / 1000), model: model, choices: [{ index: 0, delta: {}, finish_reason: "error", message: { role: 'assistant', content: `Fal Stream Error: ${JSON.stringify(errorInfo)}` } }] };
433
  res.write(`data: ${JSON.stringify(errorChunk)}\n\n`);
434
+ break;
435
  }
436
 
437
  let deltaContent = '';
 
440
  } else if (currentOutput.length > 0) {
441
  console.warn("Fal stream output mismatch detected. Sending full current output as delta.", { previousLength: previousOutput.length, currentLength: currentOutput.length });
442
  deltaContent = currentOutput;
443
+ previousOutput = '';
444
  }
445
+ previousOutput = currentOutput;
446
 
447
+ if (deltaContent || !isPartial) {
448
  const openAIChunk = { id: `chatcmpl-${Date.now()}`, object: "chat.completion.chunk", created: Math.floor(Date.now() / 1000), model: model, choices: [{ index: 0, delta: { content: deltaContent }, finish_reason: isPartial === false ? "stop" : null }] };
449
  res.write(`data: ${JSON.stringify(openAIChunk)}\n\n`);
450
  }
 
454
  console.log("Stream finished successfully.");
455
 
456
  } catch (streamError) {
 
457
  console.error('Error during stream processing:', streamError);
 
458
  if (!res.writableEnded) {
459
  try {
 
460
  const errorDetails = (streamError instanceof Error) ? streamError.message : JSON.stringify(streamError);
461
  const finalErrorChunk = { error: { message: "Stream failed", type: "proxy_error", details: errorDetails } };
462
  res.write(`data: ${JSON.stringify(finalErrorChunk)}\n\n`);
 
464
  res.end();
465
  } catch (finalError) {
466
  console.error('Error sending final stream error message to client:', finalError);
467
+ if (!res.writableEnded) { res.end(); }
468
  }
469
  }
470
  }
471
 
472
  } else { // Non-stream
473
  console.log("Executing non-stream request with failover...");
 
474
  const result = await tryFalCallWithFailover('subscribe', "fal-ai/any-llm", { input: falInput, logs: true });
475
 
476
  console.log("Received non-stream result from fal-ai via failover wrapper.");
 
 
477
 
 
478
  if (result && result.error) {
479
  console.error("Fal-ai returned an application error in non-stream mode (after successful API call):", result.error);
 
480
  return res.status(500).json({
481
  object: "error",
482
  message: `Fal-ai application error: ${JSON.stringify(result.error)}`,
483
  type: "fal_ai_error",
484
  param: null,
485
+ code: result.error.code || null
486
  });
487
  }
488
 
 
489
  const openAIResponse = {
490
+ id: `chatcmpl-${result?.requestId || Date.now()}`,
491
  object: "chat.completion",
492
  created: Math.floor(Date.now() / 1000),
493
+ model: model,
494
  choices: [{
495
  index: 0,
496
  message: {
497
  role: "assistant",
498
+ content: result?.output || ""
499
  },
500
+ finish_reason: "stop"
501
  }],
502
+ usage: {
503
  prompt_tokens: null,
504
  completion_tokens: null,
505
  total_tokens: null
506
  },
507
+ system_fingerprint: null,
508
+ ...(result?.reasoning && { fal_reasoning: result.reasoning }),
509
  };
510
  res.json(openAIResponse);
511
  console.log("Returned non-stream response successfully.");
512
  }
513
 
514
  } catch (error) {
 
515
  console.error('Unhandled error in /v1/chat/completions:', error);
516
  if (!res.headersSent) {
517
  const errorMessage = (error instanceof Error) ? error.message : JSON.stringify(error);
 
518
  const errorType = error.message?.includes("All FAL keys failed") ? "api_key_error" : "proxy_internal_error";
519
  res.status(500).json({
520
  error: {
 
525
  });
526
  } else if (!res.writableEnded) {
527
  console.error("Headers already sent, attempting to end response after error.");
528
+ res.end();
529
  }
530
  }
531
  });
 
535
  console.log(`===========================================================`);
536
  console.log(` Fal OpenAI Proxy Server (Multi-Key Failover)`);
537
  console.log(` Listening on port: ${PORT}`);
538
+ // *** 更新日志信息以引用 FAL_KEY ***
539
+ console.log(` Loaded ${falKeys.length} FAL API Key(s) from FAL_KEY.`);
540
  console.log(` API Key Auth Enabled: ${API_KEY ? 'Yes' : 'No'}`);
541
  console.log(` Limits: System Prompt=${SYSTEM_PROMPT_LIMIT}, Prompt=${PROMPT_LIMIT}`);
542
  console.log(` Chat Completions: POST http://localhost:${PORT}/v1/chat/completions`);
 
544
  console.log(`===========================================================`);
545
  });
546
 
547
+ // Root path response
548
  app.get('/', (req, res) => {
549
  res.send('Fal OpenAI Proxy (Multi-Key Failover) is running.');
550
  });