fal-openai-proxy / server.js
Calmlo's picture
Update server.js
5838dda verified
raw
history blame
27.2 kB
import express from 'express';
import { fal } from '@fal-ai/client';
// --- Key Management ---
const FAL_KEY_STRING = process.env.FAL_KEY;
const API_KEY = process.env.API_KEY; // 自定义 API Key 环境变量保持不变
if (!FAL_KEY_STRING) {
console.error("Error: FAL_KEY environment variable is not set.");
process.exit(1);
}
// 解析 FAL_KEY 字符串为数组,去除空白并过滤空值
const falKeys = FAL_KEY_STRING.split(',')
.map(key => key.trim())
.filter(key => key.length > 0);
if (falKeys.length === 0) {
console.error("Error: FAL_KEY environment variable is set, but no valid keys were found after parsing.");
process.exit(1);
}
console.log(`Loaded ${falKeys.length} Fal AI Keys.`);
let currentFalKeyIndex = 0; // Index of the *next* key to try for a *new* request
// --- End Key Management ---
if (!API_KEY) {
console.error("Error: API_KEY environment variable is not set.");
process.exit(1);
}
const app = express();
app.use(express.json({ limit: '50mb' }));
app.use(express.urlencoded({ extended: true, limit: '50mb' }));
const PORT = process.env.PORT || 3000;
// API Key 鉴权中间件 (保持不变)
const apiKeyAuth = (req, res, next) => {
const authHeader = req.headers['authorization'];
if (!authHeader) {
console.warn('Unauthorized: No Authorization header provided');
return res.status(401).json({ error: 'Unauthorized: No API Key provided' });
}
const authParts = authHeader.split(' ');
if (authParts.length !== 2 || authParts[0].toLowerCase() !== 'bearer') {
console.warn('Unauthorized: Invalid Authorization header format');
return res.status(401).json({ error: 'Unauthorized: Invalid Authorization header format' });
}
const providedKey = authParts[1];
if (providedKey !== API_KEY) {
console.warn('Unauthorized: Invalid API Key');
return res.status(401).json({ error: 'Unauthorized: Invalid API Key' });
}
next();
};
// 应用 API Key 鉴权中间件到所有 API 路由 (保持不变)
app.use(['/v1/models', '/v1/chat/completions'], apiKeyAuth);
// === 全局定义限制 === (保持不变)
const PROMPT_LIMIT = 4800;
const SYSTEM_PROMPT_LIMIT = 4800;
// === 限制定义结束 ===
// 定义 fal-ai/any-llm 支持的模型列表 (保持不变)
const FAL_SUPPORTED_MODELS = [
"anthropic/claude-3.7-sonnet",
"anthropic/claude-3.5-sonnet",
"anthropic/claude-3-5-haiku",
"anthropic/claude-3-haiku",
"google/gemini-pro-1.5",
"google/gemini-flash-1.5",
"google/gemini-flash-1.5-8b",
"google/gemini-2.0-flash-001",
"meta-llama/llama-3.2-1b-instruct",
"meta-llama/llama-3.2-3b-instruct",
"meta-llama/llama-3.1-8b-instruct",
"meta-llama/llama-3.1-70b-instruct",
"openai/gpt-4o-mini",
"openai/gpt-4o",
"deepseek/deepseek-r1",
"meta-llama/llama-4-maverick",
"meta-llama/llama-4-scout"
];
// Helper function to get owner from model ID (保持不变)
const getOwner = (modelId) => {
if (modelId && modelId.includes('/')) {
return modelId.split('/')[0];
}
return 'fal-ai';
}
// GET /v1/models endpoint (保持不变)
app.get('/v1/models', (req, res) => {
console.log("Received request for GET /v1/models");
try {
const modelsData = FAL_SUPPORTED_MODELS.map(modelId => ({
id: modelId, object: "model", created: 1700000000, owned_by: getOwner(modelId)
}));
res.json({ object: "list", data: modelsData });
console.log("Successfully returned model list.");
} catch (error) {
console.error("Error processing GET /v1/models:", error);
res.status(500).json({ error: "Failed to retrieve model list." });
}
});
// === convertMessagesToFalPrompt 函数 (保持不变) ===
function convertMessagesToFalPrompt(messages) {
let fixed_system_prompt_content = "";
const conversation_message_blocks = [];
// console.log(`Original messages count: ${messages.length}`); // Reduced logging verbosity
// 1. 分离 System 消息,格式化 User/Assistant 消息
for (const message of messages) {
let content = (message.content === null || message.content === undefined) ? "" : String(message.content);
switch (message.role) {
case 'system':
fixed_system_prompt_content += `System: ${content}\n\n`;
break;
case 'user':
conversation_message_blocks.push(`Human: ${content}\n\n`);
break;
case 'assistant':
conversation_message_blocks.push(`Assistant: ${content}\n\n`);
break;
default:
console.warn(`Unsupported role: ${message.role}`);
continue;
}
}
// 2. 截断合并后的 system 消息(如果超长)
if (fixed_system_prompt_content.length > SYSTEM_PROMPT_LIMIT) {
const originalLength = fixed_system_prompt_content.length;
fixed_system_prompt_content = fixed_system_prompt_content.substring(0, SYSTEM_PROMPT_LIMIT);
console.warn(`Combined system messages truncated from ${originalLength} to ${SYSTEM_PROMPT_LIMIT}`);
}
fixed_system_prompt_content = fixed_system_prompt_content.trim();
// 3. 计算 system_prompt 中留给对话历史的剩余空间
let space_occupied_by_fixed_system = 0;
if (fixed_system_prompt_content.length > 0) {
space_occupied_by_fixed_system = fixed_system_prompt_content.length + 4; // 预留 \n\n...\n\n 的长度
}
const remaining_system_limit = Math.max(0, SYSTEM_PROMPT_LIMIT - space_occupied_by_fixed_system);
// console.log(`Trimmed fixed system prompt length: ${fixed_system_prompt_content.length}. Approx remaining system history limit: ${remaining_system_limit}`);
// 4. 反向填充 User/Assistant 对话历史
const prompt_history_blocks = [];
const system_prompt_history_blocks = [];
let current_prompt_length = 0;
let current_system_history_length = 0;
let promptFull = false;
let systemHistoryFull = (remaining_system_limit <= 0);
// console.log(`Processing ${conversation_message_blocks.length} user/assistant messages for recency filling.`);
for (let i = conversation_message_blocks.length - 1; i >= 0; i--) {
const message_block = conversation_message_blocks[i];
const block_length = message_block.length;
if (promptFull && systemHistoryFull) {
// console.log(`Both prompt and system history slots full. Omitting older messages from index ${i}.`);
break;
}
// 优先尝试放入 prompt
if (!promptFull) {
if (current_prompt_length + block_length <= PROMPT_LIMIT) {
prompt_history_blocks.unshift(message_block);
current_prompt_length += block_length;
continue;
} else {
promptFull = true;
// console.log(`Prompt limit (${PROMPT_LIMIT}) reached. Trying system history slot.`);
}
}
// 如果 prompt 满了,尝试放入 system_prompt 的剩余空间
if (!systemHistoryFull) {
if (current_system_history_length + block_length <= remaining_system_limit) {
system_prompt_history_blocks.unshift(message_block);
current_system_history_length += block_length;
continue;
} else {
systemHistoryFull = true;
// console.log(`System history limit (${remaining_system_limit}) reached.`);
}
}
}
// 5. *** 组合最终的 prompt 和 system_prompt (包含分隔符逻辑) ***
const system_prompt_history_content = system_prompt_history_blocks.join('').trim();
const final_prompt = prompt_history_blocks.join('').trim();
const SEPARATOR = "\n\n-------下面是比较早之前的对话内容-----\n\n";
let final_system_prompt = "";
const hasFixedSystem = fixed_system_prompt_content.length > 0;
const hasSystemHistory = system_prompt_history_content.length > 0;
if (hasFixedSystem && hasSystemHistory) {
final_system_prompt = fixed_system_prompt_content + SEPARATOR + system_prompt_history_content;
// console.log("Combining fixed system prompt and history with separator.");
} else if (hasFixedSystem) {
final_system_prompt = fixed_system_prompt_content;
// console.log("Using only fixed system prompt.");
} else if (hasSystemHistory) {
final_system_prompt = system_prompt_history_content;
// console.log("Using only history in system prompt slot.");
}
const result = {
system_prompt: final_system_prompt,
prompt: final_prompt
};
// console.log(`Final system_prompt length (Sys+Separator+Hist): ${result.system_prompt.length}`);
// console.log(`Final prompt length (Hist): ${result.prompt.length}`);
return result;
}
// === convertMessagesToFalPrompt 函数结束 ===
// POST /v1/chat/completions endpoint (带 Key 重试逻辑 - Stream 修正版)
app.post('/v1/chat/completions', async (req, res) => {
const { model, messages, stream = false, reasoning = false, ...restOpenAIParams } = req.body;
const requestId = `req-${Date.now()}`; // Unique ID for this incoming request
console.log(`[${requestId}] Received chat completion request for model: ${model}, stream: ${stream}`);
if (!FAL_SUPPORTED_MODELS.includes(model)) {
console.warn(`[${requestId}] Warning: Requested model '${model}' is not in the explicitly supported list.`);
}
if (!model || !messages || !Array.isArray(messages) || messages.length === 0) {
console.error(`[${requestId}] Invalid request parameters:`, { model, messages: Array.isArray(messages) ? messages.length : typeof messages });
return res.status(400).json({ error: 'Missing or invalid parameters: model and messages array are required.' });
}
let lastError = null; // Store the last error encountered during key rotation
let success = false; // Flag to indicate if any key succeeded
// 准备 Fal Input (只需要准备一次)
const { prompt, system_prompt } = convertMessagesToFalPrompt(messages);
const falInput = {
model: model,
prompt: prompt,
...(system_prompt && { system_prompt: system_prompt }),
reasoning: !!reasoning,
};
// 打印一次 Fal Input 和 Prompt 信息
console.log(`[${requestId}] Fal Input (prepared once):`, JSON.stringify(falInput, null, 2));
console.log(`[${requestId}] System Prompt Length:`, system_prompt?.length || 0);
console.log(`[${requestId}] Prompt Length:`, prompt?.length || 0);
// *** 重试循环:尝试最多 falKeys.length 次 ***
for (let attempt = 0; attempt < falKeys.length; attempt++) {
const keyIndexToTry = (currentFalKeyIndex + attempt) % falKeys.length;
const selectedFalKey = falKeys[keyIndexToTry];
console.log(`[${requestId}] Attempt ${attempt + 1}/${falKeys.length}: Trying Fal Key at index ${keyIndexToTry}`);
try {
// 配置 fal 客户端 (每次尝试都重新配置)
fal.config({
credentials: selectedFalKey,
});
// --- 执行 Fal AI 调用 ---
if (stream) {
// --- 流式处理 ---
res.setHeader('Content-Type', 'text/event-stream; charset=utf-8');
res.setHeader('Cache-Control', 'no-cache');
res.setHeader('Connection', 'keep-alive');
res.setHeader('Access-Control-Allow-Origin', '*');
// !! 不要在这里 flushHeaders !!
let previousOutput = '';
let firstEventProcessed = false;
let streamFailedMidway = false; // Flag for errors after successful start
let keyConfirmedWorking = false; // Flag if key actually produced data
const falStream = await fal.stream("fal-ai/any-llm", { input: falInput });
// 处理流
try {
for await (const event of falStream) {
const currentOutput = (event && typeof event.output === 'string') ? event.output : '';
const isPartial = (event && typeof event.partial === 'boolean') ? event.partial : true;
const errorInfo = (event && event.error) ? event.error : null;
const eventStatus = errorInfo?.status; // Check status within error object if present
// --- 检查事件错误 ---
if (errorInfo) {
console.warn(`[${requestId}] Error received in fal stream event (Key Index ${keyIndexToTry}):`, errorInfo);
lastError = errorInfo; // Store the error
// 如果是第一次事件且是 Key 相关错误 (401/403/429),则中断此 key 的尝试
if (!firstEventProcessed && (eventStatus === 401 || eventStatus === 403 || eventStatus === 429)) {
console.warn(`[${requestId}] Key-related error (${eventStatus}) on first stream event for key index ${keyIndexToTry}. Aborting this attempt.`);
// 不需要发送响应,直接跳出内部循环,让外部循环尝试下一个 key
break; // Exit the inner `for await...of` loop
} else {
// 如果是其他错误,或者非第一次事件的错误,则认为是流处理失败
console.error(`[${requestId}] Unrecoverable stream error or error after stream start.`);
streamFailedMidway = true; // Mark stream as failed after start
if (!res.headersSent) {
// 如果还没发header,说明key可能一开始就返回错误,直接发送500
res.status(500).json({ object: "error", message: `Fal Stream Error: ${JSON.stringify(errorInfo)}`, type:"fal_stream_error"});
console.error(`[${requestId}] Headers not sent, responding with 500 JSON error.`);
} else if (!res.writableEnded) {
// 如果已发header,发送错误chunk
const errorChunk = { id: `chatcmpl-${Date.now()}-error`, object: "chat.completion.chunk", created: Math.floor(Date.now() / 1000), model: model, choices: [{ index: 0, delta: {}, finish_reason: "error", message: { role: 'assistant', content: `Fal Stream Error: ${JSON.stringify(errorInfo)}` } }] };
res.write(`data: ${JSON.stringify(errorChunk)}\n\n`);
console.error(`[${requestId}] Headers sent, sending error chunk.`);
}
break; // Exit the inner `for await...of` loop
}
}
// --- 成功接收到第一个非错误事件 ---
if (!keyConfirmedWorking && !errorInfo) {
success = true; // Mark overall success *for this request*
keyConfirmedWorking = true; // Mark this specific key as working
currentFalKeyIndex = (keyIndexToTry + 1) % falKeys.length; // Update global index for next request
console.log(`[${requestId}] Key at index ${keyIndexToTry} confirmed working. Next request starts at index ${currentFalKeyIndex}.`);
if (!res.headersSent) {
res.flushHeaders();
console.log(`[${requestId}] Stream headers flushed.`);
}
firstEventProcessed = true;
}
// --- 处理有效数据 ---
if (!errorInfo) {
let deltaContent = '';
if (currentOutput.startsWith(previousOutput)) {
deltaContent = currentOutput.substring(previousOutput.length);
} else if (currentOutput.length > 0) {
console.warn(`[${requestId}] Fal stream output mismatch detected. Sending full current output as delta.`, { previousLength: previousOutput.length, currentLength: currentOutput.length });
deltaContent = currentOutput;
previousOutput = ''; // Reset previous if mismatch
}
previousOutput = currentOutput;
if (deltaContent || !isPartial) {
const openAIChunk = { id: `chatcmpl-${Date.now()}`, object: "chat.completion.chunk", created: Math.floor(Date.now() / 1000), model: model, choices: [{ index: 0, delta: { content: deltaContent }, finish_reason: isPartial === false ? "stop" : null }] };
if (!res.writableEnded) {
res.write(`data: ${JSON.stringify(openAIChunk)}\n\n`);
}
}
}
} // End `for await...of` loop
// --- 循环后处理 ---
if (streamFailedMidway) {
// 如果是因为流中途错误跳出的,确保响应结束
if (!res.writableEnded) {
res.write(`data: [DONE]\n\n`); // Send DONE even after error as per OpenAI spec
res.end();
console.log(`[${requestId}] Stream ended with [DONE] after mid-stream error.`);
}
break; // Exit the outer key retry loop because the stream failed *after* starting
} else if (keyConfirmedWorking) {
// 如果 Key 正常工作且循环正常结束 (没有 break)
if (!res.writableEnded) {
res.write(`data: [DONE]\n\n`);
res.end();
console.log(`[${requestId}] Stream finished normally and [DONE] sent.`);
}
break; // Exit the outer key retry loop because we succeeded
}
// If loop finished without confirming the key worked and without mid-stream error (e.g., first event was key error)
// let the outer loop continue to the next key.
} catch (streamProcessingError) {
// This catches errors in the stream processing *logic* itself, less likely
console.error(`[${requestId}] Error during fal stream processing loop logic:`, streamProcessingError);
lastError = streamProcessingError;
if (!res.headersSent) {
res.status(500).json({ object: "error", message: `Proxy Stream Processing Error: ${streamProcessingError.message}`, type:"proxy_internal_error"});
console.error(`[${requestId}] Headers not sent, responding with 500 JSON error for stream logic failure.`);
} else if (!res.writableEnded) {
try {
res.write(`data: ${JSON.stringify({ error: { message: "Proxy Stream processing error", type: "proxy_internal_error", details: streamProcessingError.message } })}\n\n`);
res.write(`data: [DONE]\n\n`);
res.end();
console.error(`[${requestId}] Headers sent, sending error chunk for stream logic failure.`);
} catch (finalError) {
console.error(`[${requestId}] Error sending final error message to client:`, finalError);
if (!res.writableEnded) { res.end(); }
}
}
break; // Exit the outer key retry loop
}
// If we reached here and `success` is true, it means the stream finished successfully.
if (success) {
break; // Exit the outer key retry loop
}
// Otherwise, the stream ended because the first event was a key error, continue the outer loop.
} else {
// --- 非流式处理 (基本不变) ---
console.log(`[${requestId}] Executing non-stream request with key index ${keyIndexToTry}...`);
const result = await fal.subscribe("fal-ai/any-llm", { input: falInput, logs: true });
if (result && result.error) {
console.error(`[${requestId}] Fal-ai returned a business error with key index ${keyIndexToTry}:`, result.error);
lastError = new Error(`Fal-ai error: ${JSON.stringify(result.error)}`);
lastError.status = result.status || 500; // Use status from error if available
lastError.type = "fal_ai_error";
// Business errors (e.g., bad input) usually shouldn't be retried with other keys
break; // Exit retry loop
}
console.log(`[${requestId}] Received non-stream result from fal-ai with key index ${keyIndexToTry}`);
success = true; // Mark overall success
currentFalKeyIndex = (keyIndexToTry + 1) % falKeys.length; // Update global index
console.log(`[${requestId}] Key at index ${keyIndexToTry} successful (non-stream). Next request starts at index ${currentFalKeyIndex}.`);
const openAIResponse = {
id: `chatcmpl-${result.requestId || Date.now()}`, object: "chat.completion", created: Math.floor(Date.now() / 1000), model: model,
choices: [{ index: 0, message: { role: "assistant", content: result.output || "" }, finish_reason: "stop" }],
usage: { prompt_tokens: null, completion_tokens: null, total_tokens: null },
system_fingerprint: null,
...(result.reasoning && { fal_reasoning: result.reasoning }),
};
res.json(openAIResponse);
break; // 成功,跳出重试循环
}
} catch (error) {
// This outer catch handles errors from fal.config, fal.stream setup (before first event), fal.subscribe setup
lastError = error;
const status = error?.status;
const errorMessage = error?.body?.detail || error?.message || 'Unknown setup error';
console.warn(`[${requestId}] Attempt ${attempt + 1} with key index ${keyIndexToTry} failed during setup. Status: ${status || 'N/A'}, Message: ${errorMessage}`);
console.error("Setup Error details:", error); // Log full error
// Check for key-related errors during setup
if (status === 401 || status === 403 || status === 429) {
console.log(`[${requestId}] Key-related setup error (${status}). Trying next key...`);
// Continue the outer loop
} else {
// Unrecoverable setup error (e.g., network, internal fal error)
console.error(`[${requestId}] Unrecoverable setup error encountered. Status: ${status || 'N/A'}. Stopping key rotation.`);
break; // Exit the outer key retry loop
}
}
} // --- 结束重试循环 ---
// 如果循环结束了还没有成功 (所有 Key 都失败了或遇到不可恢复错误)
if (!success) {
console.error(`[${requestId}] All Fal Key attempts failed or an unrecoverable error occurred.`);
if (!res.headersSent) {
const statusCode = lastError?.status || 503; // Use status from last error (could be from setup or first stream event), default 503
const errorMessage = (lastError instanceof Error) ? lastError.message : JSON.stringify(lastError);
const detailMessage = lastError?.body?.detail || errorMessage; // Prefer detailed message
const errorType = lastError?.type || (statusCode === 401 || statusCode === 403 || statusCode === 429 ? "key_error" : "proxy_error");
console.error(`[${requestId}] Sending final error response. Status: ${statusCode}, Type: ${errorType}, Message: ${detailMessage}`);
res.status(statusCode).json({
object: "error",
message: `All Fal Key attempts failed or an unrecoverable error occurred. Last error: ${detailMessage}`,
type: errorType,
param: null,
code: statusCode === 429 ? "rate_limit_exceeded" : (statusCode === 401 || statusCode === 403 ? "invalid_api_key" : "service_unavailable")
});
} else if (!res.writableEnded) {
// This case should be less likely now as stream errors are handled inside the loop
console.error(`[${requestId}] Headers potentially sent, but request failed. Attempting to end stream.`);
try {
// Don't send another error chunk if one might have been sent already
res.write(`data: [DONE]\n\n`);
res.end();
} catch (e) {
console.error(`[${requestId}] Failed to write final [DONE] to stream:`, e);
if (!res.writableEnded) res.end();
}
} else {
console.error(`[${requestId}] Request failed, but response stream was already fully ended. Cannot send error.`);
}
}
});
// 启动服务器 (更新启动信息)
app.listen(PORT, () => {
console.log(`===================================================`);
console.log(` Fal OpenAI Proxy Server (Key Rotation with Retry v2 + System Top + Separator + Recency)`); // 更新描述
console.log(` Listening on port: ${PORT}`);
console.log(` Loaded ${falKeys.length} Fal AI Keys for rotation.`);
console.log(` Using Limits: System Prompt=${SYSTEM_PROMPT_LIMIT}, Prompt=${PROMPT_LIMIT}`);
console.log(` API Key Auth Enabled: ${API_KEY ? 'Yes' : 'No'}`);
console.log(` Chat Completions Endpoint: POST http://localhost:${PORT}/v1/chat/completions`);
console.log(` Models Endpoint: GET http://localhost:${PORT}/v1/models`);
console.log(`===================================================`);
});
// 根路径响应 (更新信息)
app.get('/', (req, res) => {
res.send('Fal OpenAI Proxy (Key Rotation with Retry v2 + System Top + Separator + Recency Strategy) is running.'); // 更新描述
});