Calmlo commited on
Commit
5de0798
·
verified ·
1 Parent(s): 89e1f0c

Update server.js

Browse files
Files changed (1) hide show
  1. server.js +240 -533
server.js CHANGED
@@ -1,142 +1,57 @@
1
  import express from 'express';
2
- // Import the 'fal' object directly for configuration within the retry loop
3
  import { fal } from '@fal-ai/client';
4
 
5
- // --- Key Management Setup ---
6
- // Read comma-separated keys from the SINGLE environment variable FAL_KEY
7
  const FAL_KEY_STRING = process.env.FAL_KEY;
8
- // Read the custom API Key for proxy authentication
9
- const API_KEY = process.env.API_KEY;
10
 
11
- // --- Initial Environment Variable Checks ---
12
  if (!FAL_KEY_STRING) {
13
- console.error("ERROR: FAL_KEY environment variable is not set.");
14
- console.error("Ensure FAL_KEY contains a comma-separated list of your Fal AI keys.");
15
- process.exit(1); // Exit if no Fal keys are provided
16
  }
17
 
18
- // Parse the comma-separated keys from FAL_KEY_STRING
19
  const falKeys = FAL_KEY_STRING.split(',')
20
- .map(key => key.trim()) // Remove leading/trailing whitespace
21
- .filter(key => key.length > 0); // Remove any empty strings resulting from extra commas
22
 
23
  if (falKeys.length === 0) {
24
- console.error("ERROR: No valid Fal keys found in the FAL_KEY environment variable after parsing.");
25
- console.error("Ensure FAL_KEY is a comma-separated list, e.g., 'key1,key2,key3'.");
26
- process.exit(1); // Exit if parsing results in zero valid keys
27
  }
28
 
29
- if (!API_KEY) {
30
- console.error("ERROR: API_KEY environment variable is not set.");
31
- process.exit(1); // Exit if the proxy auth key is missing
32
- }
33
- // --- End Initial Checks ---
34
-
35
-
36
- let currentKeyIndex = 0;
37
- // Keep track of keys that failed persistently during runtime
38
- const invalidKeys = new Set();
39
-
40
- console.log(`Loaded ${falKeys.length} Fal AI Key(s) from the FAL_KEY environment variable.`);
41
-
42
- /**
43
- * Gets the next available valid Fal AI key in a round-robin fashion.
44
- * Skips keys that have been marked as invalid.
45
- * @returns {object|null} An object containing the key and its original index { key, index }, or null if no valid keys remain.
46
- */
47
- function getNextValidKey() {
48
- // Check if all keys have been marked as invalid
49
- if (invalidKeys.size >= falKeys.length) {
50
- console.error("All Fal AI keys are marked as invalid.");
51
- return null; // No valid keys left
52
- }
53
-
54
- const initialIndex = currentKeyIndex;
55
- let attempts = 0; // Prevent infinite loops in edge cases
56
- while (attempts < falKeys.length) {
57
- const keyIndex = currentKeyIndex % falKeys.length;
58
- const key = falKeys[keyIndex];
59
-
60
- // Move to the next index for the *next* call, regardless of validity
61
- currentKeyIndex = (keyIndex + 1) % falKeys.length;
62
-
63
- // Check if the current key is NOT in the invalid set
64
- if (!invalidKeys.has(key)) {
65
- // Found a valid key
66
- console.log(`Using Fal Key index: ${keyIndex} (from FAL_KEY list)`);
67
- return { key, index: keyIndex }; // Return the key and its original index
68
- } else {
69
- console.log(`Skipping invalid Fal Key index: ${keyIndex}`);
70
- }
71
 
72
- attempts++;
73
- // Safety check: If we've looped back to the start after trying, break.
74
- if (currentKeyIndex === initialIndex && attempts > 0) {
75
- console.warn("Looped through all keys, potentially all are invalid.");
76
- break;
77
- }
78
- }
79
 
80
- // If we exit the loop, it means no valid key was found
81
- console.error("Could not find a valid Fal AI key after checking all potentially available keys.");
82
- return null;
 
 
 
 
83
  }
 
84
 
85
- /**
86
- * Checks if an error object likely indicates an issue with the Fal AI API key.
87
- * This is heuristic-based and may need refinement based on observed Fal errors.
88
- * @param {Error|object|null} error - The error object caught.
89
- * @returns {boolean} True if the error seems key-related, false otherwise.
90
- */
91
- function isKeyRelatedError(error) {
92
- if (!error) return false; // Handle null/undefined errors
93
-
94
- const message = error.message?.toLowerCase() || '';
95
- // Check common HTTP status properties
96
- const status = error.status || error.statusCode;
97
-
98
- // Check for specific HTTP status codes (401: Unauthorized, 403: Forbidden, 429: Too Many Requests/Quota)
99
- if (status === 401 || status === 403 || status === 429) {
100
- console.warn(`Detected potential key-related error (HTTP Status: ${status}).`);
101
- return true;
102
- }
103
-
104
- // Check for common error message patterns (case-insensitive)
105
- const keyErrorPatterns = [
106
- 'invalid api key', 'authentication failed', 'permission denied',
107
- 'quota exceeded', 'forbidden', 'unauthorized', 'rate limit',
108
- 'credentials', 'api key missing', 'invalid credential',
109
- 'exhausted balance', 'user is locked' // Added based on observed logs
110
- ];
111
- if (keyErrorPatterns.some(pattern => message.includes(pattern))) {
112
- console.warn(`Detected potential key-related error (message contains relevant pattern: "${message}")`);
113
- return true;
114
- }
115
-
116
- // Check the body.detail if status is 403 (as seen in Fal errors)
117
- if (status === 403 && error.body?.detail) {
118
- const detailMessage = String(error.body.detail).toLowerCase();
119
- if (keyErrorPatterns.some(pattern => detailMessage.includes(pattern))) {
120
- console.warn(`Detected potential key-related error (body.detail contains relevant pattern: "${detailMessage}")`);
121
- return true;
122
- }
123
- }
124
 
125
- // Add more specific checks based on observed Fal AI errors if needed
126
- // e.g., if (error.code === 'FAL_AUTH_FAILURE') return true;
127
-
128
- return false;
129
  }
130
- // --- End Key Management Setup ---
 
 
 
 
131
 
132
  const app = express();
133
- // Increase payload size limits if needed
134
  app.use(express.json({ limit: '50mb' }));
135
  app.use(express.urlencoded({ extended: true, limit: '50mb' }));
136
 
137
  const PORT = process.env.PORT || 3000;
138
 
139
- // API Key Authentication Middleware
140
  const apiKeyAuth = (req, res, next) => {
141
  const authHeader = req.headers['authorization'];
142
 
@@ -145,32 +60,30 @@ const apiKeyAuth = (req, res, next) => {
145
  return res.status(401).json({ error: 'Unauthorized: No API Key provided' });
146
  }
147
 
148
- // Expecting "Bearer YOUR_API_KEY"
149
  const authParts = authHeader.split(' ');
150
  if (authParts.length !== 2 || authParts[0].toLowerCase() !== 'bearer') {
151
- console.warn('Unauthorized: Invalid Authorization header format. Expected "Bearer <key>".');
152
  return res.status(401).json({ error: 'Unauthorized: Invalid Authorization header format' });
153
  }
154
 
155
  const providedKey = authParts[1];
156
  if (providedKey !== API_KEY) {
157
- console.warn('Unauthorized: Invalid API Key provided.');
158
  return res.status(401).json({ error: 'Unauthorized: Invalid API Key' });
159
  }
160
 
161
- // Key is valid, proceed to the next middleware or route handler
162
  next();
163
  };
164
 
165
- // Apply API Key Authentication to relevant endpoints
166
  app.use(['/v1/models', '/v1/chat/completions'], apiKeyAuth);
167
 
168
- // === Global Limits Definition ===
169
- const PROMPT_LIMIT = 4800; // Max length for the main 'prompt' field
170
- const SYSTEM_PROMPT_LIMIT = 4800; // Max length for the 'system_prompt' field
171
- // === End Limits Definition ===
172
 
173
- // Define the list of models supported by fal-ai/any-llm (Update as needed)
174
  const FAL_SUPPORTED_MODELS = [
175
  "anthropic/claude-3.7-sonnet",
176
  "anthropic/claude-3.5-sonnet",
@@ -189,27 +102,22 @@ const FAL_SUPPORTED_MODELS = [
189
  "deepseek/deepseek-r1",
190
  "meta-llama/llama-4-maverick",
191
  "meta-llama/llama-4-scout"
192
- // Add or remove models here
193
  ];
194
 
195
- // Helper function to extract the owner/organization from a model ID string
196
  const getOwner = (modelId) => {
197
- if (modelId && typeof modelId === 'string' && modelId.includes('/')) {
198
  return modelId.split('/')[0];
199
  }
200
- // Default owner if format is unexpected or missing
201
  return 'fal-ai';
202
  }
203
 
204
- // GET /v1/models endpoint - Returns the list of supported models
205
  app.get('/v1/models', (req, res) => {
206
  console.log("Received request for GET /v1/models");
207
  try {
208
  const modelsData = FAL_SUPPORTED_MODELS.map(modelId => ({
209
- id: modelId,
210
- object: "model",
211
- created: Math.floor(Date.now() / 1000), // Use current timestamp
212
- owned_by: getOwner(modelId)
213
  }));
214
  res.json({ object: "list", data: modelsData });
215
  console.log("Successfully returned model list.");
@@ -219,483 +127,282 @@ app.get('/v1/models', (req, res) => {
219
  }
220
  });
221
 
222
-
223
- /**
224
- * Converts OpenAI-style messages array to Fal AI's prompt and system_prompt format.
225
- * Implements System prompt top-priority, separator, and recency-based history filling.
226
- * Includes robustness checks for input validation and internal errors.
227
- * @param {Array<object>} messages - Array of message objects ({ role: string, content: string|null }).
228
- * @returns {object} An object containing { system_prompt: string, prompt: string }.
229
- * @throws {Error} If input is fundamentally invalid (e.g., not an array) or an unexpected internal processing error occurs.
230
- */
231
  function convertMessagesToFalPrompt(messages) {
232
- // --- Optional Debug Log: Uncomment to see the exact input causing issues ---
233
- // console.log(">>> Entering convertMessagesToFalPrompt with messages:", JSON.stringify(messages, null, 2));
234
-
235
- // --- Input Validation ---
236
- if (!Array.isArray(messages)) {
237
- console.error("!!! ERROR in convertMessagesToFalPrompt: Input 'messages' is not an array.");
238
- // Throw an error here because this is a fundamental type mismatch from the caller.
239
- throw new Error("Invalid input: 'messages' must be an array.");
 
 
 
 
 
 
 
 
 
 
 
 
 
240
  }
241
- if (messages.length === 0) {
242
- console.warn("Warning in convertMessagesToFalPrompt: Input 'messages' array is empty.");
243
- // Return empty strings if no messages, this is valid input.
244
- return { system_prompt: "", prompt: "" };
 
 
245
  }
246
- // --- End Input Validation ---
247
-
248
- // --- Main Processing Logic ---
249
- try { // *** Wrap core logic in try...catch to handle internal errors ***
250
- let fixed_system_prompt_content = "";
251
- const conversation_message_blocks = [];
252
- // console.log(`Original messages count: ${messages.length}`);
253
-
254
- // 1. Separate System messages, format User/Assistant messages
255
- for (const message of messages) {
256
- // ** Validate individual message structure **
257
- if (!message || typeof message !== 'object' || typeof message.role !== 'string') {
258
- console.warn(`--> Skipping invalid message object in convertMessagesToFalPrompt: ${JSON.stringify(message)}`);
259
- continue; // Skip this malformed message, proceed with others
260
- }
261
 
262
- // ** Safely handle content (null/undefined/non-string become empty string) **
263
- let content = (message.content === null || message.content === undefined) ? "" : String(message.content);
264
-
265
- switch (message.role) {
266
- case 'system':
267
- fixed_system_prompt_content += `System: ${content}\n\n`;
268
- break;
269
- case 'user':
270
- conversation_message_blocks.push(`Human: ${content}\n\n`);
271
- break;
272
- case 'assistant':
273
- conversation_message_blocks.push(`Assistant: ${content}\n\n`);
274
- break;
275
- default:
276
- console.warn(`--> Unsupported role encountered in convertMessagesToFalPrompt: '${message.role}'. Skipping message.`);
277
- continue; // Skip messages with unsupported roles
278
- }
279
- }
280
 
281
- // 2. Truncate combined system messages if they exceed the limit
282
- if (fixed_system_prompt_content.length > SYSTEM_PROMPT_LIMIT) {
283
- const originalLength = fixed_system_prompt_content.length;
284
- fixed_system_prompt_content = fixed_system_prompt_content.substring(0, SYSTEM_PROMPT_LIMIT);
285
- console.warn(`Combined system messages truncated from ${originalLength} to ${SYSTEM_PROMPT_LIMIT} characters.`);
286
- }
287
- fixed_system_prompt_content = fixed_system_prompt_content.trim();
288
 
289
- // 3. Calculate remaining space in system_prompt for history
290
- let space_occupied_by_fixed_system = 0;
291
- if (fixed_system_prompt_content.length > 0) {
292
- space_occupied_by_fixed_system = fixed_system_prompt_content.length + 4; // Approx for spacing
293
- }
294
- const remaining_system_limit = Math.max(0, SYSTEM_PROMPT_LIMIT - space_occupied_by_fixed_system);
295
-
296
- // 4. Fill history backwards (recency): Prioritize 'prompt', then 'system_prompt' overflow
297
- const prompt_history_blocks = [];
298
- const system_prompt_history_blocks = [];
299
- let current_prompt_length = 0;
300
- let current_system_history_length = 0;
301
- let promptFull = (PROMPT_LIMIT <= 0);
302
- let systemHistoryFull = (remaining_system_limit <= 0);
303
-
304
- for (let i = conversation_message_blocks.length - 1; i >= 0; i--) {
305
- const message_block = conversation_message_blocks[i];
306
- const block_length = (typeof message_block === 'string') ? message_block.length : 0; // Ensure it's a string
307
-
308
- if (block_length === 0) continue;
309
- if (promptFull && systemHistoryFull) break;
310
-
311
- // Try fitting into the main 'prompt' first
312
- if (!promptFull) {
313
- if (current_prompt_length + block_length <= PROMPT_LIMIT) {
314
- prompt_history_blocks.unshift(message_block);
315
- current_prompt_length += block_length;
316
- continue;
317
- } else {
318
- promptFull = true;
319
- }
320
- }
321
 
322
- // If prompt is full, try fitting into the 'system_prompt' remaining space
323
- if (!systemHistoryFull) {
324
- if (current_system_history_length + block_length <= remaining_system_limit) {
325
- system_prompt_history_blocks.unshift(message_block);
326
- current_system_history_length += block_length;
327
- continue;
328
- } else {
329
- systemHistoryFull = true;
330
- }
331
- }
332
- }
333
 
334
- // 5. Combine the final prompt and system_prompt parts
335
- const system_prompt_history_content = system_prompt_history_blocks.join('').trim();
336
- const final_prompt = prompt_history_blocks.join('').trim();
337
- const SEPARATOR = "\n\n------- Earlier Conversation History -------\n\n";
338
- let final_system_prompt = "";
339
- const hasFixedSystem = fixed_system_prompt_content.length > 0;
340
- const hasSystemHistory = system_prompt_history_content.length > 0;
341
-
342
- if (hasFixedSystem && hasSystemHistory) {
343
- final_system_prompt = fixed_system_prompt_content + SEPARATOR + system_prompt_history_content;
344
- } else if (hasFixedSystem) {
345
- final_system_prompt = fixed_system_prompt_content;
346
- } else if (hasSystemHistory) {
347
- final_system_prompt = system_prompt_history_content;
348
- }
349
 
350
- // 6. ** Crucially, always return an object **
351
- const result = {
352
- system_prompt: final_system_prompt,
353
- prompt: final_prompt
354
- };
355
- // console.log("<<< Exiting convertMessagesToFalPrompt successfully."); // Optional success log
356
- return result;
357
-
358
- } catch (internalError) { // *** Catch any unexpected errors during processing ***
359
- console.error("!!! CRITICAL ERROR inside convertMessagesToFalPrompt processing:", internalError);
360
- // Log the input that caused the error for debugging
361
- console.error("!!! Failing messages input was:", JSON.stringify(messages, null, 2));
362
- // Re-throw the error so the main handler knows something went wrong during setup.
363
- // This will be caught by the outer try...catch in the route handler.
364
- throw new Error(`Failed to process messages internally: ${internalError.message}`);
365
- }
366
- }
367
- // === End convertMessagesToFalPrompt function ===
368
-
369
-
370
- /**
371
- * Makes a request to the Fal AI API, handling key rotation and retries on key-related errors.
372
- * For stream requests, returns the stream AND the key info used.
373
- * @param {object} falInput - The input object for the Fal AI API call.
374
- * @param {boolean} [stream=false] - Whether to make a streaming request.
375
- * @returns {Promise<object|{stream: AsyncIterable<object>, keyUsed: string, indexUsed: number}>}
376
- * The result object for non-stream, or an object containing the stream and key info for stream.
377
- * @throws {Error} If the request fails after trying all valid keys, or if a non-key-related error occurs during *initiation*.
378
- */
379
- async function makeFalRequestWithRetry(falInput, stream = false) {
380
- let attempts = 0;
381
- const maxAttempts = falKeys.length;
382
- const attemptedKeysInThisRequest = new Set();
383
-
384
- while (attempts < maxAttempts) {
385
- const keyInfo = getNextValidKey();
386
- if (!keyInfo) {
387
- console.error("makeFalRequestWithRetry: No valid Fal AI keys remaining.");
388
- throw new Error("No valid Fal AI keys available (all marked as invalid).");
389
- }
390
- if (attemptedKeysInThisRequest.has(keyInfo.key)) {
391
- console.warn(`Key at index ${keyInfo.index} was already attempted for this request. Skipping to find next different key.`);
392
- continue;
393
  }
394
- attemptedKeysInThisRequest.add(keyInfo.key);
395
- attempts++;
396
-
397
- try {
398
- console.log(`Attempt ${attempts}/${maxAttempts}: Trying Fal Key index ${keyInfo.index}...`);
399
- console.warn(`Configuring GLOBAL fal client with key index ${keyInfo.index}. Review concurrency implications.`);
400
- fal.config({ credentials: keyInfo.key });
401
-
402
- if (stream) {
403
- const falStream = await fal.stream("fal-ai/any-llm", { input: falInput });
404
- console.log(`Successfully initiated stream with key index ${keyInfo.index}.`);
405
- // Return stream AND key info
406
- return { stream: falStream, keyUsed: keyInfo.key, indexUsed: keyInfo.index };
407
  } else {
408
- // Non-stream logic
409
- console.log(`Executing non-stream request with key index ${keyInfo.index}...`);
410
- const result = await fal.subscribe("fal-ai/any-llm", { input: falInput, logs: true });
411
- console.log(`Successfully received non-stream result with key index ${keyInfo.index}.`);
412
- if (result && result.error) {
413
- console.error(`Fal AI returned an error object within the non-stream result payload (Key Index ${keyInfo.index}):`, result.error);
414
- if (isKeyRelatedError(result.error)) {
415
- console.warn(`Marking Fal Key index ${keyInfo.index} as invalid due to error in response payload.`);
416
- invalidKeys.add(keyInfo.key);
417
- continue; // Try next key
418
- } else {
419
- throw new Error(`Fal AI error reported in result payload: ${JSON.stringify(result.error)}`);
420
- }
421
- }
422
- return result; // Return only result for non-stream
423
  }
424
- } catch (error) {
425
- // This catch block handles errors during *request initiation*
426
- console.error(`Error caught during request initiation using Fal Key index ${keyInfo.index}:`, error.message || error);
427
- if (isKeyRelatedError(error)) {
428
- console.warn(`Marking Fal Key index ${keyInfo.index} as invalid due to caught initiation error.`);
429
- invalidKeys.add(keyInfo.key);
430
- console.log(`--> Invalid Keys Set now contains: [${Array.from(invalidKeys).join(', ')}]`); // Log the set content
431
- // Continue loop to try the next key
432
  } else {
433
- console.error("Initiation error does not appear to be key-related. Failing request without further key retries.");
434
- throw error; // Re-throw non-key-related initiation error
435
  }
436
  }
437
- } // End while loop
 
 
 
 
438
 
439
- // If loop finishes, all keys failed during initiation
440
- throw new Error(`Request initiation failed after trying ${attempts} unique Fal key(s). All failed with key-related errors or were already marked invalid.`);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
441
  }
 
442
 
443
 
444
- // POST /v1/chat/completions endpoint - Handles chat requests
445
  app.post('/v1/chat/completions', async (req, res) => {
446
  const { model, messages, stream = false, reasoning = false, ...restOpenAIParams } = req.body;
447
- console.log(`--> POST /v1/chat/completions | Model: ${model} | Stream: ${stream}`);
448
 
449
- // --- Input validation for model and messages ---
450
- if (!model || !messages || !Array.isArray(messages) || messages.length === 0) {
451
- console.error("Invalid request: Missing 'model' or 'messages' array is empty/invalid.");
452
- return res.status(400).json({ error: 'Bad Request: `model` and a non-empty `messages` array are required.' });
453
- }
454
  if (!FAL_SUPPORTED_MODELS.includes(model)) {
455
- console.warn(`Warning: Requested model '${model}' is not in the explicitly supported list. Proxy will still attempt the request.`);
 
 
 
 
456
  }
457
- // --- End Input Validation ---
458
-
459
- let keyUsedForRequest = null; // Variable to store the key used if initiation succeeds
460
- let indexUsedForRequest = null;
461
 
462
  try {
463
- // --- Prepare Fal AI Input ---
464
- // This might throw an error if convertMessagesToFalPrompt fails internally
 
 
 
 
 
465
  const { prompt, system_prompt } = convertMessagesToFalPrompt(messages);
466
 
467
  const falInput = {
468
  model: model,
469
  prompt: prompt,
 
470
  reasoning: !!reasoning,
471
  };
472
- if (system_prompt && system_prompt.length > 0) {
473
- falInput.system_prompt = system_prompt;
474
- }
475
- // --- End Prepare Input ---
476
-
477
- console.log("Attempting Fal request with key rotation/retry logic...");
478
- console.log(`Prepared Input Lengths - System Prompt: ${system_prompt?.length || 0}, Prompt: ${prompt?.length || 0}`);
479
-
480
- // --- Handle Stream vs Non-Stream ---
 
 
 
 
481
  if (stream) {
482
- // Set headers for Server-Sent Events (SSE)
483
  res.setHeader('Content-Type', 'text/event-stream; charset=utf-8');
484
  res.setHeader('Cache-Control', 'no-cache');
485
  res.setHeader('Connection', 'keep-alive');
486
- res.setHeader('Access-Control-Allow-Origin', '*'); // Adjust CORS for production if needed
487
- res.flushHeaders(); // Send headers immediately
 
 
488
 
489
- let previousOutput = ''; // Track previous output for delta calculation
490
- let streamResult; // To hold the object { stream, keyUsed, indexUsed }
491
 
492
  try {
493
- // **Initiate the stream using the retry helper**
494
- // This can throw if initiation fails after all retries
495
- streamResult = await makeFalRequestWithRetry(falInput, true);
496
- const falStream = streamResult.stream;
497
- keyUsedForRequest = streamResult.keyUsed; // Store the key used for this stream
498
- indexUsedForRequest = streamResult.indexUsed;
499
-
500
- // Process the stream events asynchronously
501
  for await (const event of falStream) {
502
- // Safely extract data from the event
503
  const currentOutput = (event && typeof event.output === 'string') ? event.output : '';
504
  const isPartial = (event && typeof event.partial === 'boolean') ? event.partial : true;
505
  const errorInfo = (event && event.error) ? event.error : null;
506
 
507
- // Handle errors reported *within* a stream event payload
508
  if (errorInfo) {
509
- console.error("Error received *within* fal stream event payload:", errorInfo);
510
- const errorChunk = {
511
- id: `chatcmpl-${Date.now()}-error`, object: "chat.completion.chunk", created: Math.floor(Date.now() / 1000), model: model,
512
- choices: [{ index: 0, delta: {}, finish_reason: "error", message: { role: 'assistant', content: `Fal Stream Event Error: ${JSON.stringify(errorInfo)}` } }]
513
- };
514
- // Check write status before sending
515
- if (!res.writableEnded) { res.write(`data: ${JSON.stringify(errorChunk)}\n\n`); }
516
- else { console.warn("Stream ended before writing event error."); }
517
  }
518
 
519
- // Calculate the delta (new content)
520
  let deltaContent = '';
521
  if (currentOutput.startsWith(previousOutput)) {
522
  deltaContent = currentOutput.substring(previousOutput.length);
523
  } else if (currentOutput.length > 0) {
524
- console.warn("Fal stream output mismatch/reset. Sending full current output as delta.");
525
- deltaContent = currentOutput;
526
- previousOutput = '';
527
  }
528
  previousOutput = currentOutput;
529
 
530
- // Send OpenAI-compatible SSE chunk
531
  if (deltaContent || !isPartial) {
532
- const openAIChunk = {
533
- id: `chatcmpl-${Date.now()}`,
534
- object: "chat.completion.chunk",
535
- created: Math.floor(Date.now() / 1000),
536
- model: model,
537
- choices: [{
538
- index: 0,
539
- delta: { content: deltaContent },
540
- finish_reason: isPartial === false ? "stop" : null
541
- }]
542
- };
543
- // Check write status before sending
544
- if (!res.writableEnded) { res.write(`data: ${JSON.stringify(openAIChunk)}\n\n`); }
545
- else { console.warn("Stream ended before writing data chunk."); }
546
  }
547
- } // End for-await loop
548
-
549
- // Send the final [DONE] marker
550
  if (!res.writableEnded) {
551
  res.write(`data: [DONE]\n\n`);
552
- res.end(); // Close the connection
553
- console.log("<-- Stream finished successfully and [DONE] sent.");
554
- } else {
555
- console.log("<-- Stream processing finished, but connection was already ended before [DONE].");
556
  }
557
 
558
- // ==================================================
559
- // START: UPDATED CATCH BLOCK FOR STREAM HANDLING
560
- // ==================================================
561
  } catch (streamError) {
562
- // **Catch block for errors during stream processing OR initiation failure**
563
- console.error('Error during stream request processing/initiation:', streamError.message || streamError);
564
- // **Log the specific error object for more details**
565
- console.error('Full streamError object:', streamError);
566
-
567
- // **Check if the error is key-related AND if initiation succeeded**
568
- // This ensures we only invalidate the key if the error happened *during* processing
569
- // using a key that successfully initiated the stream.
570
- if (keyUsedForRequest && isKeyRelatedError(streamError)) {
571
- console.warn(`--> Marking Fal Key index ${indexUsedForRequest} as invalid due to error during stream processing.`);
572
- invalidKeys.add(keyUsedForRequest);
573
- console.log(`--> Invalid Keys Set now contains: [${Array.from(invalidKeys).map(k => k.substring(0, 5) + '...').join(', ')}]`); // Log obfuscated keys in set
 
574
  }
575
- // else: Error was non-key-related or happened during initiation (already handled/logged in makeFalRequestWithRetry).
576
-
577
- // --- Safely Report error back to the client ---
578
- try {
579
- // **Check headersSent first for initiation errors**
580
- if (!res.headersSent) {
581
- console.log("<-- Attempting to send 502 error response (headers not sent).");
582
- const errorMessage = (streamError instanceof Error) ? streamError.message : JSON.stringify(streamError);
583
- // Avoid sending huge objects if stringify fails or is too large
584
- const detail = String(errorMessage).length < 1000 ? errorMessage : "Error details too large or complex, check server logs.";
585
- res.status(502).json({ error: 'Failed to initiate Fal stream', details: detail });
586
- console.log("<-- Stream initiation failed response sent (502).");
587
- }
588
- // **Check writableEnded for errors during processing**
589
- else if (!res.writableEnded) {
590
- console.log("<-- Attempting to send error within stream (headers sent, not ended).");
591
- const errorDetails = (streamError instanceof Error) ? streamError.message : JSON.stringify(streamError);
592
- const detail = String(errorDetails).length < 1000 ? errorDetails : "Error details too large or complex, check server logs.";
593
- // Send an error object in the SSE stream format
594
- res.write(`data: ${JSON.stringify({ error: { message: "Stream processing error after initiation", type: "proxy_error", details: detail } })}\n\n`);
595
- // Still attempt to send [DONE] for robust client handling
596
- res.write(`data: [DONE]\n\n`);
597
- res.end(); // Explicitly end the response
598
- console.log("<-- Stream error sent within stream, stream ended.");
599
- } else {
600
- // Stream already ended, just log server-side.
601
- console.log("<-- Stream error occurred, but connection was already ended. Cannot send error to client.");
602
- }
603
- } catch (finalError) {
604
- // Error trying to send the error message itself (e.g., network gone)
605
- console.error('!!! Error sending stream error message to client:', finalError);
606
- // Attempt to end the response if it wasn't already
607
- if (!res.writableEnded) {
608
- console.log("<-- Forcefully ending response after error during error reporting.");
609
- res.end();
610
- }
611
- }
612
- // --- End error reporting ---
613
  }
614
- // ==================================================
615
- // END: UPDATED CATCH BLOCK FOR STREAM HANDLING
616
- // ==================================================
617
-
618
  } else {
619
- // --- Non-Stream Request ---
620
- try {
621
- // Get the result using the retry helper
622
- const result = await makeFalRequestWithRetry(falInput, false);
623
-
624
- // Construct OpenAI compatible response
625
- const openAIResponse = {
626
- id: `chatcmpl-${result.requestId || Date.now()}`,
627
- object: "chat.completion",
628
- created: Math.floor(Date.now() / 1000),
629
- model: model,
630
- choices: [{
631
- index: 0,
632
- message: {
633
- role: "assistant",
634
- content: result.output || "" // Ensure content is string
635
- },
636
- finish_reason: "stop"
637
- }],
638
- usage: { prompt_tokens: null, completion_tokens: null, total_tokens: null },
639
- system_fingerprint: null,
640
- ...(result.reasoning && { fal_reasoning: result.reasoning }),
641
- };
642
-
643
- res.json(openAIResponse);
644
- console.log("<-- Non-stream response sent successfully.");
645
-
646
- } catch (error) {
647
- // Catches errors from makeFalRequestWithRetry (e.g., all keys failed or non-key error)
648
- console.error('Error during non-stream request processing:', error.message || error);
649
- if (!res.headersSent) {
650
- const errorMessage = (error instanceof Error) ? error.message : JSON.stringify(error);
651
- const finalMessage = errorMessage.includes("No valid Fal AI keys available") || errorMessage.includes("Request failed after trying")
652
- ? `Fal request failed: ${errorMessage}`
653
- : `Internal Server Error processing Fal request: ${errorMessage}`;
654
- res.status(502).json({ error: 'Fal Request Failed', details: finalMessage });
655
- console.log("<-- Non-stream error response sent (502).");
656
- } else {
657
- console.error("Headers already sent for non-stream error response? This is unexpected.");
658
- if (!res.writableEnded) { res.end(); }
659
- }
660
  }
661
- } // --- End Stream/Non-Stream Logic ---
 
 
 
 
 
 
 
 
 
662
 
663
  } catch (error) {
664
- // **Catch block for errors BEFORE Fal request attempt**
665
- // (e.g., errors from convertMessagesToFalPrompt, JSON parsing errors)
666
- console.error('Unhandled error before initiating Fal request (likely setup or input conversion):', error.message || error);
667
  if (!res.headersSent) {
668
  const errorMessage = (error instanceof Error) ? error.message : JSON.stringify(error);
669
- // Use 500 Internal Server Error for issues within the proxy itself during setup
670
- res.status(500).json({ error: 'Internal Server Error in Proxy Setup', details: errorMessage });
671
- console.log("<-- Proxy setup error response sent (500).");
672
- } else {
673
- console.error("Headers already sent when catching setup error. Ending response.");
674
- if (!res.writableEnded) { res.end(); }
675
  }
676
  }
677
  });
678
 
679
- // Start the Express server
680
  app.listen(PORT, () => {
681
- console.log(`=====================================================================`);
682
- console.log(` Fal OpenAI Proxy Server (Multi-Key Rotation & Failover)`);
683
- console.log(`---------------------------------------------------------------------`);
684
- console.log(` Listening on port : ${PORT}`);
685
- console.log(` Reading Fal Keys from : FAL_KEY environment variable (comma-separated)`);
686
- console.log(` Loaded Keys Count : ${falKeys.length}`);
687
- console.log(` Invalid Keys Set : Initialized (size: ${invalidKeys.size})`);
688
- console.log(` Proxy API Key Auth : ${API_KEY ? 'Enabled (using API_KEY env var)' : 'DISABLED'}`);
689
- console.log(` Input Limits : System Prompt=${SYSTEM_PROMPT_LIMIT}, Prompt=${PROMPT_LIMIT}`);
690
- console.log(` Concurrency Warning : Global Fal client reconfigured per request attempt!`);
691
- console.log(`---------------------------------------------------------------------`);
692
- console.log(` Endpoints Available:`);
693
- console.log(` POST http://localhost:${PORT}/v1/chat/completions`);
694
- console.log(` GET http://localhost:${PORT}/v1/models`);
695
- console.log(`=====================================================================`);
696
  });
697
 
698
- // Root path handler for basic health check / info
699
  app.get('/', (req, res) => {
700
- res.send(`Fal OpenAI Proxy (Multi-Key Rotation/Failover from FAL_KEY) is running. Loaded ${falKeys.length} key(s). Currently ${invalidKeys.size} key(s) marked as invalid.`);
701
  });
 
1
  import express from 'express';
 
2
  import { fal } from '@fal-ai/client';
3
 
4
+ // --- Key Management ---
 
5
  const FAL_KEY_STRING = process.env.FAL_KEY;
6
+ const API_KEY = process.env.API_KEY; // 自定义 API Key 环境变量保持不变
 
7
 
 
8
  if (!FAL_KEY_STRING) {
9
+ console.error("Error: FAL_KEY environment variable is not set.");
10
+ process.exit(1);
 
11
  }
12
 
13
+ // 解析 FAL_KEY 字符串为数组,去除空白并过滤空值
14
  const falKeys = FAL_KEY_STRING.split(',')
15
+ .map(key => key.trim())
16
+ .filter(key => key.length > 0);
17
 
18
  if (falKeys.length === 0) {
19
+ console.error("Error: FAL_KEY environment variable is set, but no valid keys were found after parsing.");
20
+ process.exit(1);
 
21
  }
22
 
23
+ console.log(`Loaded ${falKeys.length} Fal AI Keys.`);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
24
 
25
+ let currentFalKeyIndex = 0;
 
 
 
 
 
 
26
 
27
+ // 获取下一个 Fal Key 并循环
28
+ function getNextFalKey() {
29
+ const key = falKeys[currentFalKeyIndex];
30
+ const usedIndex = currentFalKeyIndex; // 记录本次使用的索引,用于日志
31
+ currentFalKeyIndex = (currentFalKeyIndex + 1) % falKeys.length; // 移动到下一个,如果到末尾则回到开头
32
+ console.log(`Using Fal Key at index: ${usedIndex}`);
33
+ return key;
34
  }
35
+ // --- End Key Management ---
36
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
37
 
38
+ if (!API_KEY) {
39
+ console.error("Error: API_KEY environment variable is not set.");
40
+ process.exit(1);
 
41
  }
42
+
43
+ // 注意:不再在这里全局配置 fal.config
44
+ // fal.config({
45
+ // credentials: FAL_KEY, // 移除这行
46
+ // });
47
 
48
  const app = express();
 
49
  app.use(express.json({ limit: '50mb' }));
50
  app.use(express.urlencoded({ extended: true, limit: '50mb' }));
51
 
52
  const PORT = process.env.PORT || 3000;
53
 
54
+ // API Key 鉴权中间件 (保持不变)
55
  const apiKeyAuth = (req, res, next) => {
56
  const authHeader = req.headers['authorization'];
57
 
 
60
  return res.status(401).json({ error: 'Unauthorized: No API Key provided' });
61
  }
62
 
 
63
  const authParts = authHeader.split(' ');
64
  if (authParts.length !== 2 || authParts[0].toLowerCase() !== 'bearer') {
65
+ console.warn('Unauthorized: Invalid Authorization header format');
66
  return res.status(401).json({ error: 'Unauthorized: Invalid Authorization header format' });
67
  }
68
 
69
  const providedKey = authParts[1];
70
  if (providedKey !== API_KEY) {
71
+ console.warn('Unauthorized: Invalid API Key');
72
  return res.status(401).json({ error: 'Unauthorized: Invalid API Key' });
73
  }
74
 
 
75
  next();
76
  };
77
 
78
+ // 应用 API Key 鉴权中间件到所有 API 路由 (保持不变)
79
  app.use(['/v1/models', '/v1/chat/completions'], apiKeyAuth);
80
 
81
+ // === 全局定义限制 === (保持不变)
82
+ const PROMPT_LIMIT = 4800;
83
+ const SYSTEM_PROMPT_LIMIT = 4800;
84
+ // === 限制定义结束 ===
85
 
86
+ // 定义 fal-ai/any-llm 支持的模型列表 (保持不变)
87
  const FAL_SUPPORTED_MODELS = [
88
  "anthropic/claude-3.7-sonnet",
89
  "anthropic/claude-3.5-sonnet",
 
102
  "deepseek/deepseek-r1",
103
  "meta-llama/llama-4-maverick",
104
  "meta-llama/llama-4-scout"
 
105
  ];
106
 
107
+ // Helper function to get owner from model ID (保持不变)
108
  const getOwner = (modelId) => {
109
+ if (modelId && modelId.includes('/')) {
110
  return modelId.split('/')[0];
111
  }
 
112
  return 'fal-ai';
113
  }
114
 
115
+ // GET /v1/models endpoint (保持不变)
116
  app.get('/v1/models', (req, res) => {
117
  console.log("Received request for GET /v1/models");
118
  try {
119
  const modelsData = FAL_SUPPORTED_MODELS.map(modelId => ({
120
+ id: modelId, object: "model", created: 1700000000, owned_by: getOwner(modelId)
 
 
 
121
  }));
122
  res.json({ object: "list", data: modelsData });
123
  console.log("Successfully returned model list.");
 
127
  }
128
  });
129
 
130
+ // === convertMessagesToFalPrompt 函数 (保持不变) ===
 
 
 
 
 
 
 
 
131
  function convertMessagesToFalPrompt(messages) {
132
+ let fixed_system_prompt_content = "";
133
+ const conversation_message_blocks = [];
134
+ console.log(`Original messages count: ${messages.length}`);
135
+
136
+ // 1. 分离 System 消息,格式化 User/Assistant 消息
137
+ for (const message of messages) {
138
+ let content = (message.content === null || message.content === undefined) ? "" : String(message.content);
139
+ switch (message.role) {
140
+ case 'system':
141
+ fixed_system_prompt_content += `System: ${content}\n\n`;
142
+ break;
143
+ case 'user':
144
+ conversation_message_blocks.push(`Human: ${content}\n\n`);
145
+ break;
146
+ case 'assistant':
147
+ conversation_message_blocks.push(`Assistant: ${content}\n\n`);
148
+ break;
149
+ default:
150
+ console.warn(`Unsupported role: ${message.role}`);
151
+ continue;
152
+ }
153
  }
154
+
155
+ // 2. 截断合并后的 system 消息(如果超长)
156
+ if (fixed_system_prompt_content.length > SYSTEM_PROMPT_LIMIT) {
157
+ const originalLength = fixed_system_prompt_content.length;
158
+ fixed_system_prompt_content = fixed_system_prompt_content.substring(0, SYSTEM_PROMPT_LIMIT);
159
+ console.warn(`Combined system messages truncated from ${originalLength} to ${SYSTEM_PROMPT_LIMIT}`);
160
  }
161
+ // 清理末尾可能多余的空白,以便后续判断和拼接
162
+ fixed_system_prompt_content = fixed_system_prompt_content.trim();
 
 
 
 
 
 
 
 
 
 
 
 
 
163
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
164
 
165
+ // 3. 计算 system_prompt 中留给对话历史的剩余空间
166
+ let space_occupied_by_fixed_system = 0;
167
+ if (fixed_system_prompt_content.length > 0) {
168
+ space_occupied_by_fixed_system = fixed_system_prompt_content.length + 4; // 预留 \n\n...\n\n 的长度
169
+ }
170
+ const remaining_system_limit = Math.max(0, SYSTEM_PROMPT_LIMIT - space_occupied_by_fixed_system);
171
+ console.log(`Trimmed fixed system prompt length: ${fixed_system_prompt_content.length}. Approx remaining system history limit: ${remaining_system_limit}`);
172
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
173
 
174
+ // 4. 反向填充 User/Assistant 对话历史
175
+ const prompt_history_blocks = [];
176
+ const system_prompt_history_blocks = [];
177
+ let current_prompt_length = 0;
178
+ let current_system_history_length = 0;
179
+ let promptFull = false;
180
+ let systemHistoryFull = (remaining_system_limit <= 0);
 
 
 
 
181
 
182
+ console.log(`Processing ${conversation_message_blocks.length} user/assistant messages for recency filling.`);
183
+ for (let i = conversation_message_blocks.length - 1; i >= 0; i--) {
184
+ const message_block = conversation_message_blocks[i];
185
+ const block_length = message_block.length;
 
 
 
 
 
 
 
 
 
 
 
186
 
187
+ if (promptFull && systemHistoryFull) {
188
+ console.log(`Both prompt and system history slots full. Omitting older messages from index ${i}.`);
189
+ break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
190
  }
191
+
192
+ // 优先尝试放入 prompt
193
+ if (!promptFull) {
194
+ if (current_prompt_length + block_length <= PROMPT_LIMIT) {
195
+ prompt_history_blocks.unshift(message_block);
196
+ current_prompt_length += block_length;
197
+ continue;
 
 
 
 
 
 
198
  } else {
199
+ promptFull = true;
200
+ console.log(`Prompt limit (${PROMPT_LIMIT}) reached. Trying system history slot.`);
 
 
 
 
 
 
 
 
 
 
 
 
 
201
  }
202
+ }
203
+
204
+ // 如果 prompt 满了,尝试放入 system_prompt 的剩余空间
205
+ if (!systemHistoryFull) {
206
+ if (current_system_history_length + block_length <= remaining_system_limit) {
207
+ system_prompt_history_blocks.unshift(message_block);
208
+ current_system_history_length += block_length;
209
+ continue;
210
  } else {
211
+ systemHistoryFull = true;
212
+ console.log(`System history limit (${remaining_system_limit}) reached.`);
213
  }
214
  }
215
+ }
216
+
217
+ // 5. *** 组合最终的 prompt 和 system_prompt (包含分隔符逻辑) ***
218
+ const system_prompt_history_content = system_prompt_history_blocks.join('').trim();
219
+ const final_prompt = prompt_history_blocks.join('').trim();
220
 
221
+ // 定义分隔符
222
+ const SEPARATOR = "\n\n-------下面是比较早之前的对话内容-----\n\n";
223
+
224
+ let final_system_prompt = "";
225
+
226
+ const hasFixedSystem = fixed_system_prompt_content.length > 0;
227
+ const hasSystemHistory = system_prompt_history_content.length > 0;
228
+
229
+ if (hasFixedSystem && hasSystemHistory) {
230
+ final_system_prompt = fixed_system_prompt_content + SEPARATOR + system_prompt_history_content;
231
+ console.log("Combining fixed system prompt and history with separator.");
232
+ } else if (hasFixedSystem) {
233
+ final_system_prompt = fixed_system_prompt_content;
234
+ console.log("Using only fixed system prompt.");
235
+ } else if (hasSystemHistory) {
236
+ final_system_prompt = system_prompt_history_content;
237
+ console.log("Using only history in system prompt slot.");
238
+ }
239
+
240
+ const result = {
241
+ system_prompt: final_system_prompt,
242
+ prompt: final_prompt
243
+ };
244
+
245
+ console.log(`Final system_prompt length (Sys+Separator+Hist): ${result.system_prompt.length}`);
246
+ console.log(`Final prompt length (Hist): ${result.prompt.length}`);
247
+
248
+ return result;
249
  }
250
+ // === convertMessagesToFalPrompt 函数结束 ===
251
 
252
 
253
+ // POST /v1/chat/completions endpoint (主要修改处)
254
  app.post('/v1/chat/completions', async (req, res) => {
255
  const { model, messages, stream = false, reasoning = false, ...restOpenAIParams } = req.body;
 
256
 
257
+ console.log(`Received chat completion request for model: ${model}, stream: ${stream}`);
258
+
 
 
 
259
  if (!FAL_SUPPORTED_MODELS.includes(model)) {
260
+ console.warn(`Warning: Requested model '${model}' is not in the explicitly supported list.`);
261
+ }
262
+ if (!model || !messages || !Array.isArray(messages) || messages.length === 0) {
263
+ console.error("Invalid request parameters:", { model, messages: Array.isArray(messages) ? messages.length : typeof messages });
264
+ return res.status(400).json({ error: 'Missing or invalid parameters: model and messages array are required.' });
265
  }
 
 
 
 
266
 
267
  try {
268
+ // *** 在处理请求前,获取下一个 Fal Key 并配置 fal 客户端 ***
269
+ const selectedFalKey = getNextFalKey();
270
+ fal.config({
271
+ credentials: selectedFalKey,
272
+ });
273
+ // *********************************************************
274
+
275
  const { prompt, system_prompt } = convertMessagesToFalPrompt(messages);
276
 
277
  const falInput = {
278
  model: model,
279
  prompt: prompt,
280
+ ...(system_prompt && { system_prompt: system_prompt }),
281
  reasoning: !!reasoning,
282
  };
283
+ console.log("Fal Input:", JSON.stringify(falInput, null, 2));
284
+ console.log("Forwarding request to fal-ai with system-priority + separator + recency input:");
285
+ console.log("System Prompt Length:", system_prompt?.length || 0);
286
+ console.log("Prompt Length:", prompt?.length || 0);
287
+ console.log("--- System Prompt Start ---");
288
+ console.log(system_prompt);
289
+ console.log("--- System Prompt End ---");
290
+ console.log("--- Prompt Start ---");
291
+ console.log(prompt);
292
+ console.log("--- Prompt End ---");
293
+
294
+
295
+ // --- 流式/非流式处理逻辑 (保持不变) ---
296
  if (stream) {
 
297
  res.setHeader('Content-Type', 'text/event-stream; charset=utf-8');
298
  res.setHeader('Cache-Control', 'no-cache');
299
  res.setHeader('Connection', 'keep-alive');
300
+ res.setHeader('Access-Control-Allow-Origin', '*');
301
+ res.flushHeaders();
302
+
303
+ let previousOutput = '';
304
 
305
+ const falStream = await fal.stream("fal-ai/any-llm", { input: falInput });
 
306
 
307
  try {
 
 
 
 
 
 
 
 
308
  for await (const event of falStream) {
 
309
  const currentOutput = (event && typeof event.output === 'string') ? event.output : '';
310
  const isPartial = (event && typeof event.partial === 'boolean') ? event.partial : true;
311
  const errorInfo = (event && event.error) ? event.error : null;
312
 
 
313
  if (errorInfo) {
314
+ console.error("Error received in fal stream event:", errorInfo);
315
+ const errorChunk = { id: `chatcmpl-${Date.now()}-error`, object: "chat.completion.chunk", created: Math.floor(Date.now() / 1000), model: model, choices: [{ index: 0, delta: {}, finish_reason: "error", message: { role: 'assistant', content: `Fal Stream Error: ${JSON.stringify(errorInfo)}` } }] };
316
+ res.write(`data: ${JSON.stringify(errorChunk)}\n\n`);
317
+ break; // Stop processing on error
 
 
 
 
318
  }
319
 
 
320
  let deltaContent = '';
321
  if (currentOutput.startsWith(previousOutput)) {
322
  deltaContent = currentOutput.substring(previousOutput.length);
323
  } else if (currentOutput.length > 0) {
324
+ console.warn("Fal stream output mismatch detected. Sending full current output as delta.", { previousLength: previousOutput.length, currentLength: currentOutput.length });
325
+ deltaContent = currentOutput;
326
+ previousOutput = ''; // Reset previous if mismatch
327
  }
328
  previousOutput = currentOutput;
329
 
330
+ // Send chunk if there's content or if it's the final chunk (isPartial is false)
331
  if (deltaContent || !isPartial) {
332
+ const openAIChunk = { id: `chatcmpl-${Date.now()}`, object: "chat.completion.chunk", created: Math.floor(Date.now() / 1000), model: model, choices: [{ index: 0, delta: { content: deltaContent }, finish_reason: isPartial === false ? "stop" : null }] };
333
+ res.write(`data: ${JSON.stringify(openAIChunk)}\n\n`);
 
 
 
 
 
 
 
 
 
 
 
 
334
  }
335
+ }
336
+ // After the loop, ensure the [DONE] signal is sent if the stream finished normally
 
337
  if (!res.writableEnded) {
338
  res.write(`data: [DONE]\n\n`);
339
+ res.end();
340
+ console.log("Stream finished and [DONE] sent.");
 
 
341
  }
342
 
 
 
 
343
  } catch (streamError) {
344
+ console.error('Error during fal stream processing loop:', streamError);
345
+ try {
346
+ if (!res.writableEnded) { // Check if we can still write to the response
347
+ const errorDetails = (streamError instanceof Error) ? streamError.message : JSON.stringify(streamError);
348
+ res.write(`data: ${JSON.stringify({ error: { message: "Stream processing error", type: "proxy_error", details: errorDetails } })}\n\n`);
349
+ res.write(`data: [DONE]\n\n`); // Send DONE even after error
350
+ res.end();
351
+ } else {
352
+ console.error("Stream already ended, cannot send error message.");
353
+ }
354
+ } catch (finalError) {
355
+ console.error('Error sending stream error message to client:', finalError);
356
+ if (!res.writableEnded) { res.end(); }
357
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
358
  }
 
 
 
 
359
  } else {
360
+ // --- 非流式处理 (保持不变) ---
361
+ console.log("Executing non-stream request...");
362
+ const result = await fal.subscribe("fal-ai/any-llm", { input: falInput, logs: true });
363
+ console.log("Received non-stream result from fal-ai:", JSON.stringify(result, null, 2));
364
+
365
+ if (result && result.error) {
366
+ console.error("Fal-ai returned an error in non-stream mode:", result.error);
367
+ return res.status(500).json({ object: "error", message: `Fal-ai error: ${JSON.stringify(result.error)}`, type: "fal_ai_error", param: null, code: null });
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
368
  }
369
+
370
+ const openAIResponse = {
371
+ id: `chatcmpl-${result.requestId || Date.now()}`, object: "chat.completion", created: Math.floor(Date.now() / 1000), model: model,
372
+ choices: [{ index: 0, message: { role: "assistant", content: result.output || "" }, finish_reason: "stop" }],
373
+ usage: { prompt_tokens: null, completion_tokens: null, total_tokens: null }, system_fingerprint: null,
374
+ ...(result.reasoning && { fal_reasoning: result.reasoning }),
375
+ };
376
+ res.json(openAIResponse);
377
+ console.log("Returned non-stream response.");
378
+ }
379
 
380
  } catch (error) {
381
+ console.error('Unhandled error in /v1/chat/completions:', error);
 
 
382
  if (!res.headersSent) {
383
  const errorMessage = (error instanceof Error) ? error.message : JSON.stringify(error);
384
+ res.status(500).json({ error: 'Internal Server Error in Proxy', details: errorMessage });
385
+ } else if (!res.writableEnded) {
386
+ console.error("Headers already sent, ending response.");
387
+ res.end();
 
 
388
  }
389
  }
390
  });
391
 
392
+ // 启动服务器 (更新启动信息)
393
  app.listen(PORT, () => {
394
+ console.log(`===================================================`);
395
+ console.log(` Fal OpenAI Proxy Server (Key Rotation + System Top + Separator + Recency)`);
396
+ console.log(` Listening on port: ${PORT}`);
397
+ console.log(` Loaded ${falKeys.length} Fal AI Keys for rotation.`);
398
+ console.log(` Using Limits: System Prompt=${SYSTEM_PROMPT_LIMIT}, Prompt=${PROMPT_LIMIT}`);
399
+ console.log(` API Key Auth Enabled: ${API_KEY ? 'Yes' : 'No'}`);
400
+ console.log(` Chat Completions Endpoint: POST http://localhost:${PORT}/v1/chat/completions`);
401
+ console.log(` Models Endpoint: GET http://localhost:${PORT}/v1/models`);
402
+ console.log(`===================================================`);
 
 
 
 
 
 
403
  });
404
 
405
+ // 根路径响应 (更新信息)
406
  app.get('/', (req, res) => {
407
+ res.send('Fal OpenAI Proxy (Key Rotation + System Top + Separator + Recency Strategy) is running.');
408
  });