fal-openai-proxy / server.js
Calmlo's picture
Update server.js
9df5dcc verified
raw
history blame
39 kB
import express from 'express';
// Import the 'fal' object directly for configuration within the retry loop
import { fal } from '@fal-ai/client';
// --- Key Management Setup ---
// Read comma-separated keys from the SINGLE environment variable FAL_KEY
const FAL_KEY_STRING = process.env.FAL_KEY;
// Read the custom API Key for proxy authentication
const API_KEY = process.env.API_KEY;
if (!FAL_KEY_STRING) {
console.error("ERROR: FAL_KEY environment variable is not set.");
console.error("Ensure FAL_KEY contains a comma-separated list of your Fal AI keys.");
process.exit(1); // Exit if no Fal keys are provided
}
// Parse the comma-separated keys from FAL_KEY_STRING
const falKeys = FAL_KEY_STRING.split(',')
.map(key => key.trim()) // Remove leading/trailing whitespace
.filter(key => key.length > 0); // Remove any empty strings resulting from extra commas
if (falKeys.length === 0) {
console.error("ERROR: No valid Fal keys found in the FAL_KEY environment variable after parsing.");
console.error("Ensure FAL_KEY is a comma-separated list, e.g., 'key1,key2,key3'.");
process.exit(1); // Exit if parsing results in zero valid keys
}
if (!API_KEY) {
console.error("ERROR: API_KEY environment variable is not set.");
process.exit(1); // Exit if the proxy auth key is missing
}
let currentKeyIndex = 0;
// Keep track of keys that failed persistently during runtime
const invalidKeys = new Set();
console.log(`Loaded ${falKeys.length} Fal AI Key(s) from the FAL_KEY environment variable.`);
/**
* Gets the next available valid Fal AI key in a round-robin fashion.
* Skips keys that have been marked as invalid.
* @returns {object|null} An object containing the key and its original index { key, index }, or null if no valid keys remain.
*/
function getNextValidKey() {
// Check if all keys have been marked as invalid
if (invalidKeys.size >= falKeys.length) {
console.error("All Fal AI keys are marked as invalid.");
return null; // No valid keys left
}
const initialIndex = currentKeyIndex;
let attempts = 0; // Prevent infinite loops in edge cases
while (attempts < falKeys.length) {
const keyIndex = currentKeyIndex % falKeys.length;
const key = falKeys[keyIndex];
// Move to the next index for the *next* call, regardless of validity
currentKeyIndex = (keyIndex + 1) % falKeys.length;
// Check if the current key is NOT in the invalid set
if (!invalidKeys.has(key)) {
// Found a valid key
console.log(`Using Fal Key index: ${keyIndex} (from FAL_KEY list)`);
return { key, index: keyIndex }; // Return the key and its original index
} else {
console.log(`Skipping invalid Fal Key index: ${keyIndex}`);
}
attempts++;
// Safety check: If we've looped back to the start after trying, break.
// This is mostly redundant due to the invalidKeys.size check, but acts as a safeguard.
if (currentKeyIndex === initialIndex && attempts > 0) {
console.warn("Looped through all keys, potentially all are invalid.");
break;
}
}
// If we exit the loop, it means no valid key was found
console.error("Could not find a valid Fal AI key after checking all potentially available keys.");
return null;
}
/**
* Checks if an error object likely indicates an issue with the Fal AI API key.
* This is heuristic-based and may need refinement based on observed Fal errors.
* @param {Error|object} error - The error object caught.
* @returns {boolean} True if the error seems key-related, false otherwise.
*/
function isKeyRelatedError(error) {
if (!error) return false; // Handle null/undefined errors
const message = error.message?.toLowerCase() || '';
// Check common HTTP status properties
const status = error.status || error.statusCode;
// Check for specific HTTP status codes (401: Unauthorized, 403: Forbidden, 429: Too Many Requests/Quota)
if (status === 401 || status === 403 || status === 429) {
console.warn(`Detected potential key-related error (HTTP Status: ${status}).`);
return true;
}
// Check for common error message patterns (case-insensitive)
const keyErrorPatterns = [
'invalid api key', 'authentication failed', 'permission denied',
'quota exceeded', 'forbidden', 'unauthorized', 'rate limit',
'credentials', 'api key missing', 'invalid credential'
];
if (keyErrorPatterns.some(pattern => message.includes(pattern))) {
console.warn(`Detected potential key-related error (message contains relevant pattern: "${message}")`);
return true;
}
// Add more specific checks based on observed Fal AI errors if needed
// e.g., if (error.code === 'FAL_AUTH_FAILURE') return true;
return false;
}
// --- End Key Management Setup ---
const app = express();
// Increase payload size limits if needed
app.use(express.json({ limit: '50mb' }));
app.use(express.urlencoded({ extended: true, limit: '50mb' }));
const PORT = process.env.PORT || 3000;
// API Key Authentication Middleware
const apiKeyAuth = (req, res, next) => {
const authHeader = req.headers['authorization'];
if (!authHeader) {
console.warn('Unauthorized: No Authorization header provided');
return res.status(401).json({ error: 'Unauthorized: No API Key provided' });
}
// Expecting "Bearer YOUR_API_KEY"
const authParts = authHeader.split(' ');
if (authParts.length !== 2 || authParts[0].toLowerCase() !== 'bearer') {
console.warn('Unauthorized: Invalid Authorization header format. Expected "Bearer <key>".');
return res.status(401).json({ error: 'Unauthorized: Invalid Authorization header format' });
}
const providedKey = authParts[1];
if (providedKey !== API_KEY) {
console.warn('Unauthorized: Invalid API Key provided.');
return res.status(401).json({ error: 'Unauthorized: Invalid API Key' });
}
// Key is valid, proceed to the next middleware or route handler
next();
};
// Apply API Key Authentication to relevant endpoints
app.use(['/v1/models', '/v1/chat/completions'], apiKeyAuth);
// === Global Limits Definition ===
const PROMPT_LIMIT = 4800; // Max length for the main 'prompt' field
const SYSTEM_PROMPT_LIMIT = 4800; // Max length for the 'system_prompt' field
// === End Limits Definition ===
// Define the list of models supported by fal-ai/any-llm (Update as needed)
const FAL_SUPPORTED_MODELS = [
"anthropic/claude-3.7-sonnet",
"anthropic/claude-3.5-sonnet",
"anthropic/claude-3-5-haiku",
"anthropic/claude-3-haiku",
"google/gemini-pro-1.5",
"google/gemini-flash-1.5",
"google/gemini-flash-1.5-8b",
"google/gemini-2.0-flash-001",
"meta-llama/llama-3.2-1b-instruct",
"meta-llama/llama-3.2-3b-instruct",
"meta-llama/llama-3.1-8b-instruct",
"meta-llama/llama-3.1-70b-instruct",
"openai/gpt-4o-mini",
"openai/gpt-4o",
"deepseek/deepseek-r1",
"meta-llama/llama-4-maverick",
"meta-llama/llama-4-scout"
// Add or remove models here
];
// Helper function to extract the owner/organization from a model ID string
const getOwner = (modelId) => {
if (modelId && typeof modelId === 'string' && modelId.includes('/')) {
return modelId.split('/')[0];
}
// Default owner if format is unexpected or missing
return 'fal-ai';
}
// GET /v1/models endpoint - Returns the list of supported models
app.get('/v1/models', (req, res) => {
console.log("Received request for GET /v1/models");
try {
const modelsData = FAL_SUPPORTED_MODELS.map(modelId => ({
id: modelId,
object: "model",
created: Math.floor(Date.now() / 1000), // Use current timestamp
owned_by: getOwner(modelId)
}));
res.json({ object: "list", data: modelsData });
console.log("Successfully returned model list.");
} catch (error) {
console.error("Error processing GET /v1/models:", error);
res.status(500).json({ error: "Failed to retrieve model list." });
}
});
/**
* Converts OpenAI-style messages array to Fal AI's prompt and system_prompt format.
* Implements System prompt top-priority, separator, and recency-based history filling.
* Includes robustness checks for input validation and internal errors.
* @param {Array<object>} messages - Array of message objects ({ role: string, content: string }).
* @returns {object} An object containing { system_prompt: string, prompt: string }.
* @throws {Error} If input is invalid or an internal processing error occurs.
*/
function convertMessagesToFalPrompt(messages) {
// console.log("Entering convertMessagesToFalPrompt with messages:", JSON.stringify(messages, null, 2)); // Debug log
// --- Input Validation ---
if (!Array.isArray(messages)) {
console.error("Error in convertMessagesToFalPrompt: Input 'messages' is not an array.");
throw new Error("Invalid input: 'messages' must be an array.");
}
if (messages.length === 0) {
console.warn("Warning in convertMessagesToFalPrompt: Input 'messages' array is empty.");
return { system_prompt: "", prompt: "" }; // Return empty if no messages
}
// --- End Input Validation ---
try { // Wrap main logic in try...catch for internal errors
let fixed_system_prompt_content = "";
const conversation_message_blocks = [];
// console.log(`Original messages count: ${messages.length}`);
// 1. Separate System messages, format User/Assistant messages
for (const message of messages) {
// Validate individual message structure
if (!message || typeof message !== 'object' || typeof message.role !== 'string') {
console.warn(`Skipping invalid message object in convertMessagesToFalPrompt: ${JSON.stringify(message)}`);
continue; // Skip malformed message
}
// Safely handle content (null/undefined become empty string)
let content = (message.content === null || message.content === undefined) ? "" : String(message.content);
switch (message.role) {
case 'system':
// Append all system messages together
fixed_system_prompt_content += `System: ${content}\n\n`;
break;
case 'user':
conversation_message_blocks.push(`Human: ${content}\n\n`);
break;
case 'assistant':
conversation_message_blocks.push(`Assistant: ${content}\n\n`);
break;
default:
// Log unsupported roles but continue processing
console.warn(`Unsupported role encountered in convertMessagesToFalPrompt: ${message.role}. Skipping message.`);
continue;
}
}
// 2. Truncate combined system messages if they exceed the limit
if (fixed_system_prompt_content.length > SYSTEM_PROMPT_LIMIT) {
const originalLength = fixed_system_prompt_content.length;
fixed_system_prompt_content = fixed_system_prompt_content.substring(0, SYSTEM_PROMPT_LIMIT);
console.warn(`Combined system messages truncated from ${originalLength} to ${SYSTEM_PROMPT_LIMIT} characters.`);
}
// Trim trailing whitespace from the fixed system content
fixed_system_prompt_content = fixed_system_prompt_content.trim();
// 3. Calculate remaining space in system_prompt for history
// Consider potential separator length later if needed
let space_occupied_by_fixed_system = 0;
if (fixed_system_prompt_content.length > 0) {
// Approximate space: content + potential separator overhead (\n\n...\n\n)
space_occupied_by_fixed_system = fixed_system_prompt_content.length + 4; // Heuristic for spacing
}
const remaining_system_limit = Math.max(0, SYSTEM_PROMPT_LIMIT - space_occupied_by_fixed_system);
// console.log(`Trimmed fixed system prompt length: ${fixed_system_prompt_content.length}. Approx remaining system history limit: ${remaining_system_limit}`);
// 4. Fill history backwards (recency): Prioritize 'prompt', then 'system_prompt' overflow
const prompt_history_blocks = []; // For the main 'prompt' field
const system_prompt_history_blocks = []; // For history overflowing into 'system_prompt'
let current_prompt_length = 0;
let current_system_history_length = 0;
let promptFull = (PROMPT_LIMIT <= 0); // Handle zero limit case
let systemHistoryFull = (remaining_system_limit <= 0);
// console.log(`Processing ${conversation_message_blocks.length} user/assistant messages for recency filling.`);
for (let i = conversation_message_blocks.length - 1; i >= 0; i--) {
const message_block = conversation_message_blocks[i];
// Ensure message_block is a string before getting length
const block_length = (typeof message_block === 'string') ? message_block.length : 0;
if (block_length === 0) continue; // Skip empty blocks
// If both slots are full, stop processing older messages
if (promptFull && systemHistoryFull) {
// console.log(`Both prompt and system history slots full. Omitting older messages from index ${i}.`);
break;
}
// Try fitting into the main 'prompt' first
if (!promptFull) {
if (current_prompt_length + block_length <= PROMPT_LIMIT) {
prompt_history_blocks.unshift(message_block); // Add to beginning
current_prompt_length += block_length;
continue; // Message placed, move to next older message
} else {
promptFull = true; // Main prompt is now full
// console.log(`Prompt limit (${PROMPT_LIMIT}) reached. Trying system history slot.`);
}
}
// If prompt is full, try fitting into the 'system_prompt' remaining space
if (!systemHistoryFull) {
if (current_system_history_length + block_length <= remaining_system_limit) {
system_prompt_history_blocks.unshift(message_block); // Add to beginning
current_system_history_length += block_length;
continue; // Message placed, move to next older message
} else {
systemHistoryFull = true; // System history slot is now full
// console.log(`System history limit (${remaining_system_limit}) reached.`);
}
}
}
// 5. Combine the final prompt and system_prompt parts
const system_prompt_history_content = system_prompt_history_blocks.join('').trim();
const final_prompt = prompt_history_blocks.join('').trim(); // Main prompt content
// Separator to distinguish fixed system prompt from overflow history
const SEPARATOR = "\n\n------- Earlier Conversation History -------\n\n";
let final_system_prompt = "";
// Check if we have content for each part
const hasFixedSystem = fixed_system_prompt_content.length > 0;
const hasSystemHistory = system_prompt_history_content.length > 0;
if (hasFixedSystem && hasSystemHistory) {
// Both parts exist: Combine with separator
final_system_prompt = fixed_system_prompt_content + SEPARATOR + system_prompt_history_content;
// console.log("Combining fixed system prompt and history with separator.");
} else if (hasFixedSystem) {
// Only fixed system prompt exists
final_system_prompt = fixed_system_prompt_content;
// console.log("Using only fixed system prompt.");
} else if (hasSystemHistory) {
// Only overflow history exists (fixed system prompt was empty)
final_system_prompt = system_prompt_history_content;
// console.log("Using only history in system prompt slot.");
}
// If both are empty, final_system_prompt remains ""
// 6. Return the structured result
const result = {
system_prompt: final_system_prompt,
prompt: final_prompt
};
// console.log(`Final system_prompt length: ${result.system_prompt.length}`); // Debug log
// console.log(`Final prompt length: ${result.prompt.length}`); // Debug log
return result;
} catch (internalError) {
console.error("!!! CRITICAL ERROR inside convertMessagesToFalPrompt:", internalError);
console.error("!!! Failing messages input was:", JSON.stringify(messages, null, 2)); // Log the problematic input
// Re-throw the error to be caught by the main handler, indicating a setup failure
throw new Error(`Failed to process messages internally: ${internalError.message}`);
}
}
// === End convertMessagesToFalPrompt function ===
/**
* Makes a request to the Fal AI API, handling key rotation and retries on key-related errors.
* @param {object} falInput - The input object for the Fal AI API call.
* @param {boolean} [stream=false] - Whether to make a streaming request.
* @returns {Promise<object|AsyncIterable<object>>} The result object or async iterator for streams.
* @throws {Error} If the request fails after trying all valid keys, or if a non-key-related error occurs.
*/
async function makeFalRequestWithRetry(falInput, stream = false) {
let attempts = 0;
// Maximum attempts equals the initial number of keys
const maxAttempts = falKeys.length;
// Track keys attempted *within this specific request* to avoid infinite loops on retry logic issues
const attemptedKeysInThisRequest = new Set();
while (attempts < maxAttempts) {
const keyInfo = getNextValidKey(); // Get the next *valid* key info { key, index }
if (!keyInfo) {
// This should only happen if all keys are currently in the invalidKeys set
console.error("makeFalRequestWithRetry: No valid Fal AI keys remaining.");
throw new Error("No valid Fal AI keys available (all marked as invalid).");
}
// Prevent retrying the exact same key within this single request flow
if (attemptedKeysInThisRequest.has(keyInfo.key)) {
console.warn(`Key at index ${keyInfo.index} was already attempted for this request. Skipping to find next different key.`);
// Do not increment 'attempts' here as we didn't actually use the key. Let the loop find the next.
continue;
}
attemptedKeysInThisRequest.add(keyInfo.key);
attempts++; // Count this as a distinct attempt with a unique key for this request
try {
console.log(`Attempt ${attempts}/${maxAttempts}: Trying Fal Key index ${keyInfo.index}...`);
// *** CRITICAL: Reconfigure the global fal client with the selected key for THIS attempt ***
// WARNING: This reconfigures the GLOBAL client instance. In high-concurrency scenarios,
// this might lead to race conditions where one request configures the key just before another uses it.
// Consider instance pooling or check if fal-ai client offers per-request credentials if this becomes an issue.
console.warn(`Configuring GLOBAL fal client with key index ${keyInfo.index}. Review concurrency implications.`);
fal.config({ credentials: keyInfo.key }); // Use the specific key for this attempt
if (stream) {
// Use the now-configured global 'fal' object for the stream request
const falStream = await fal.stream("fal-ai/any-llm", { input: falInput });
console.log(`Successfully initiated stream with key index ${keyInfo.index}.`);
// Success! Return the stream iterator directly for the caller to handle
return falStream;
} else {
// Use the now-configured global 'fal' object for the non-stream request
console.log(`Executing non-stream request with key index ${keyInfo.index}...`);
// Use fal.subscribe (or appropriate non-stream method)
const result = await fal.subscribe("fal-ai/any-llm", {
input: falInput,
logs: true // Enable logs if helpful for debugging Fal side
});
console.log(`Successfully received non-stream result with key index ${keyInfo.index}.`);
// Optional: Check for errors *within* a successful-looking response structure
if (result && result.error) {
console.error(`Fal AI returned an error object within the non-stream result payload (Key Index ${keyInfo.index}):`, result.error);
// Decide if this specific payload error should also invalidate the key
if (isKeyRelatedError(result.error)) { // Reuse the checker
console.warn(`Marking Fal Key index ${keyInfo.index} as invalid due to error in response payload.`);
invalidKeys.add(keyInfo.key);
continue; // Go to the next iteration of the while loop (try next key)
} else {
// Throw an error that will be caught by the outer handler, not retried with other keys
throw new Error(`Fal AI error reported in result payload: ${JSON.stringify(result.error)}`);
}
}
// Success! Return the result
return result;
}
} catch (error) {
console.error(`Error caught using Fal Key index ${keyInfo.index}:`, error.message || error);
// Check if the caught error indicates the key is invalid
if (isKeyRelatedError(error)) {
console.warn(`Marking Fal Key index ${keyInfo.index} as invalid due to caught error.`);
// **ACTION: Add the failed key to the set of invalid keys**
invalidKeys.add(keyInfo.key);
// Continue to the next iteration of the while loop to try another key
} else {
// Error does not appear key-related (e.g., network issue, bad input format, internal Fal server error)
// Do not retry with other keys for this type of error. Fail the request immediately.
console.error("Error does not appear to be key-related. Failing request without further key retries.");
throw error; // Re-throw the original error to be caught by the main endpoint handler
}
}
} // End while loop
// If the loop finishes without returning/throwing earlier, it means all available keys were tried and failed with key-related errors
throw new Error(`Request failed after trying ${attempts} unique Fal key(s). All failed with key-related errors or were already marked invalid.`);
}
// POST /v1/chat/completions endpoint - Handles chat requests, uses key rotation/failover
app.post('/v1/chat/completions', async (req, res) => {
// Extract parameters from request body
const { model, messages, stream = false, reasoning = false, ...restOpenAIParams } = req.body; // restOpenAIParams currently ignored but captured
console.log(`--> POST /v1/chat/completions | Model: ${model} | Stream: ${stream}`);
// Basic Input Validation
if (!FAL_SUPPORTED_MODELS.includes(model)) {
// Log warning but allow attempt if model isn't in the known list
console.warn(`Warning: Requested model '${model}' is not in the explicitly supported list. Proxy will still attempt the request.`);
}
if (!model || !messages || !Array.isArray(messages) || messages.length === 0) {
console.error("Invalid request: Missing 'model' or 'messages' array is empty/invalid.");
return res.status(400).json({ error: 'Bad Request: `model` and a non-empty `messages` array are required.' });
}
try {
// --- Prepare Fal AI Input using the conversion function ---
// This step might throw an error if messages are invalid, caught by the outer catch block
const { prompt, system_prompt } = convertMessagesToFalPrompt(messages);
const falInput = {
model: model, // Pass the requested model
prompt: prompt, // The main prompt constructed from recent history
// Only include system_prompt if it has content
...(system_prompt && system_prompt.length > 0 && { system_prompt: system_prompt }),
reasoning: !!reasoning, // Ensure boolean, pass reasoning flag if provided
};
// console.debug("Prepared Fal Input:", JSON.stringify(falInput, null, 2)); // Verbose debug log
console.log("Attempting Fal request with key rotation/retry logic...");
console.log(`Prepared Input Lengths - System Prompt: ${system_prompt?.length || 0}, Prompt: ${prompt?.length || 0}`);
// --- Handle Stream vs Non-Stream using the retry helper function ---
if (stream) {
// Set headers for Server-Sent Events (SSE)
res.setHeader('Content-Type', 'text/event-stream; charset=utf-8');
res.setHeader('Cache-Control', 'no-cache');
res.setHeader('Connection', 'keep-alive');
res.setHeader('Access-Control-Allow-Origin', '*'); // Adjust CORS for production if needed
res.flushHeaders(); // Send headers immediately
let previousOutput = ''; // Track previous output for delta calculation
let falStream; // Variable to hold the stream iterator
try {
// **Initiate the stream using the retry helper**
falStream = await makeFalRequestWithRetry(falInput, true);
// Process the stream events asynchronously
for await (const event of falStream) {
// Safely extract data from the event
const currentOutput = (event && typeof event.output === 'string') ? event.output : '';
// Default to partial=true if missing
const isPartial = (event && typeof event.partial === 'boolean') ? event.partial : true;
const errorInfo = (event && event.error) ? event.error : null; // Check for errors within the stream event
// Handle errors reported *within* a stream event payload
if (errorInfo) {
console.error("Error received *within* fal stream event payload:", errorInfo);
// Optionally send an error chunk to the client
const errorChunk = {
id: `chatcmpl-${Date.now()}-error`, object: "chat.completion.chunk", created: Math.floor(Date.now() / 1000), model: model,
choices: [{ index: 0, delta: {}, finish_reason: "error", message: { role: 'assistant', content: `Fal Stream Event Error: ${JSON.stringify(errorInfo)}` } }]
};
// Check if stream is still writable before sending
if (!res.writableEnded) {
res.write(`data: ${JSON.stringify(errorChunk)}\n\n`);
} else {
console.warn("Stream already ended when trying to write stream event error.");
}
// Depending on the error, you might want to break or continue
// break; // Uncomment to stop processing on first stream error
}
// Calculate the delta (new content) since the last event
let deltaContent = '';
if (currentOutput.startsWith(previousOutput)) {
// Normal case: current output contains previous plus new content
deltaContent = currentOutput.substring(previousOutput.length);
} else if (currentOutput.length > 0) {
// Output mismatch or reset: send the entire current output as delta
console.warn("Fal stream output mismatch or reset detected. Sending full current output as delta.");
deltaContent = currentOutput;
previousOutput = ''; // Reset comparison base on mismatch
} // If currentOutput is empty, deltaContent remains empty
previousOutput = currentOutput; // Update for the next iteration
// Send OpenAI-compatible SSE chunk if there's new content or if it's the final chunk
if (deltaContent || !isPartial) {
const openAIChunk = {
id: `chatcmpl-${Date.now()}`, // Consider more robust ID generation if needed
object: "chat.completion.chunk",
created: Math.floor(Date.now() / 1000),
model: model, // Echo back the requested model
choices: [{
index: 0,
delta: { content: deltaContent }, // The new part of the content
// Set finish_reason only on the final chunk
finish_reason: isPartial === false ? "stop" : null
}]
// system_fingerprint is not provided by Fal, so omit or set to null
};
// Check if stream is still writable before sending
if (!res.writableEnded) {
res.write(`data: ${JSON.stringify(openAIChunk)}\n\n`);
} else {
console.warn("Stream already ended when trying to write data chunk.");
}
}
} // End for-await loop over falStream
// Send the final [DONE] marker to indicate stream completion
if (!res.writableEnded) {
res.write(`data: [DONE]\n\n`);
res.end(); // Close the connection
console.log("<-- Stream finished successfully and [DONE] sent.");
} else {
console.log("<-- Stream processing finished, but connection was already ended before [DONE].");
}
} catch (streamError) {
// Catches errors from makeFalRequestWithRetry OR the stream iteration itself (e.g., network drop)
console.error('Error during stream request processing:', streamError.message || streamError);
try {
// If headers haven't been sent, the error likely happened during initial connection (makeFalRequestWithRetry)
if (!res.headersSent) {
const errorMessage = (streamError instanceof Error) ? streamError.message : JSON.stringify(streamError);
// Use 502 Bad Gateway for upstream failures (like all keys failing)
res.status(502).json({
error: 'Failed to initiate Fal stream',
details: errorMessage // Include the error message from the helper
});
console.log("<-- Stream initiation failed response sent (502).");
} else if (!res.writableEnded) {
// Stream started but failed mid-way. Try to send an error message within the stream context.
const errorDetails = (streamError instanceof Error) ? streamError.message : JSON.stringify(streamError);
// Send an error object in the SSE stream format
res.write(`data: ${JSON.stringify({ error: { message: "Stream processing error after initiation", type: "proxy_error", details: errorDetails } })}\n\n`);
res.write(`data: [DONE]\n\n`); // Still send DONE after error for robust client handling
res.end();
console.log("<-- Stream error sent within stream, stream ended.");
} else {
// Stream already ended, just log the error server-side.
console.log("<-- Stream error occurred, but connection was already ended.");
}
} catch (finalError) {
// Error trying to send the error message itself (rare)
console.error('Error sending stream error message to client:', finalError);
// Ensure response is ended if possible
if (!res.writableEnded) { res.end(); }
}
}
} else {
// --- Non-Stream Request ---
try {
// **Get the result using the retry helper**
const result = await makeFalRequestWithRetry(falInput, false);
// console.debug("Received non-stream result via retry function:", JSON.stringify(result, null, 2)); // Verbose debug
// --- Construct OpenAI compatible response ---
const openAIResponse = {
id: `chatcmpl-${result.requestId || Date.now()}`, // Use Fal's requestId if available
object: "chat.completion",
created: Math.floor(Date.now() / 1000),
model: model, // Echo back the requested model
choices: [{
index: 0,
message: {
role: "assistant",
content: result.output || "" // Ensure content is a string, default to empty if missing
},
finish_reason: "stop" // Assume 'stop' for successful non-stream completion
}],
usage: { // Fal doesn't provide token usage, return nulls
prompt_tokens: null,
completion_tokens: null,
total_tokens: null
},
system_fingerprint: null, // Fal doesn't provide this
// Include Fal specific reasoning if present and requested
...(result.reasoning && { fal_reasoning: result.reasoning }),
};
res.json(openAIResponse);
console.log("<-- Non-stream response sent successfully.");
} catch (error) {
// Catches errors from makeFalRequestWithRetry (e.g., all keys failed or a non-key-related Fal error)
console.error('Error during non-stream request processing:', error.message || error);
// Check if response can still be sent
if (!res.headersSent) {
const errorMessage = (error instanceof Error) ? error.message : JSON.stringify(error);
// Customize error message if it's the specific "all keys failed" error
const finalMessage = errorMessage.includes("No valid Fal AI keys available") || errorMessage.includes("Request failed after trying")
? `Fal request failed: ${errorMessage}` // More direct message
: `Internal Server Error processing Fal request: ${errorMessage}`;
// Use 502 Bad Gateway to indicate upstream failure
res.status(502).json({ error: 'Fal Request Failed', details: finalMessage });
console.log("<-- Non-stream error response sent (502).");
} else {
// Should be rare for non-stream, but log if headers were already sent
console.error("Headers already sent for non-stream error response? This is unexpected.");
if (!res.writableEnded) { res.end(); } // Attempt to end response if possible
}
}
}
} catch (error) {
// Catch errors occurring *before* the Fal request attempt
// (e.g., error during `convertMessagesToFalPrompt`, JSON parsing errors)
console.error('Unhandled error before initiating Fal request (likely setup or input conversion):', error.message || error);
if (!res.headersSent) {
const errorMessage = (error instanceof Error) ? error.message : JSON.stringify(error);
// Use 500 Internal Server Error for issues within the proxy itself
res.status(500).json({ error: 'Internal Server Error in Proxy Setup', details: errorMessage });
console.log("<-- Proxy setup error response sent (500).");
} else {
console.error("Headers already sent when catching setup error. Ending response.");
if (!res.writableEnded) { res.end(); }
}
}
});
// Start the Express server
app.listen(PORT, () => {
console.log(`=====================================================================`);
console.log(` Fal OpenAI Proxy Server (Multi-Key Rotation & Failover)`);
console.log(`---------------------------------------------------------------------`);
console.log(` Listening on port : ${PORT}`);
console.log(` Reading Fal Keys from : FAL_KEY environment variable (comma-separated)`);
console.log(` Loaded Keys Count : ${falKeys.length}`);
console.log(` Invalid Keys Set : Initialized (size: ${invalidKeys.size})`);
console.log(` Proxy API Key Auth : ${API_KEY ? 'Enabled (using API_KEY env var)' : 'DISABLED'}`);
console.log(` Input Limits : System Prompt=${SYSTEM_PROMPT_LIMIT}, Prompt=${PROMPT_LIMIT}`);
console.log(` Concurrency Warning : Global Fal client reconfigured per request attempt!`);
console.log(`---------------------------------------------------------------------`);
console.log(` Endpoints Available:`);
console.log(` POST http://localhost:${PORT}/v1/chat/completions`);
console.log(` GET http://localhost:${PORT}/v1/models`);
console.log(`=====================================================================`);
});
// Root path handler for basic health check / info
app.get('/', (req, res) => {
res.send(`Fal OpenAI Proxy (Multi-Key Rotation/Failover from FAL_KEY) is running. Loaded ${falKeys.length} key(s). Currently ${invalidKeys.size} key(s) marked as invalid.`);
});