test-g / index.js
yxmiler's picture
Update index.js
69c7de2 verified
raw
history blame
49.8 kB
import express from 'express';
import fetch from 'node-fetch';
import dotenv from 'dotenv';
import { v4 as uuidv4 } from 'uuid';
import cors from 'cors';
// 初始化环境变量
dotenv.config();
/**
* 配置管理类
*/
class Config {
constructor() {
this.initializeApiKeys();
this.initializeAuth();
// 主API Key池 (可用于新请求)
this.apiKeys = []; // This will be filled by initializeApiKeys
// 已使用的API Key池 (成功请求后放入,用于循环使用)
this.usedApiKeys = [];
// 失效的API Key池 (永久失效,不再使用)
this.invalidApiKeys = [];
// Gemini安全设置
this.geminiSafety = [
{
category: 'HARM_CATEGORY_HARASSMENT',
threshold: 'OFF',
},
{
category: 'HARM_CATEGORY_HATE_SPEECH',
threshold: 'OFF',
},
{
category: 'HARM_CATEGORY_SEXUALLY_EXPLICIT',
threshold: 'OFF',
},
{
category: 'HARM_CATEGORY_DANGEROUS_CONTENT',
threshold: 'OFF',
},
{
category: 'HARM_CATEGORY_CIVIC_INTEGRITY',
threshold: 'OFF',
},
];
}
/**
* 初始化API Keys
*/
initializeApiKeys() {
const apiKeysEnv = process.env.GEMINI_API_KEYS;
if (!apiKeysEnv) {
console.error('❌ 错误: 未找到 GEMINI_API_KEYS 环境变量');
process.exit(1);
}
// 通过换行符分割API Keys
this.apiKeys = apiKeysEnv
.split('\n')
.map(key => key.trim())
.filter(key => key.length > 0);
if (this.apiKeys.length === 0) {
console.error('❌ 错误: 没有找到有效的API Keys');
process.exit(1);
}
console.log(`✅ 成功加载 ${this.apiKeys.length} 个API Keys`);
}
/**
* 初始化认证配置
*/
initializeAuth() {
this.authToken = process.env.AUTH_TOKEN || 'sk-123456';
}
/**
* 获取可用的API Key(负载均衡)
* 只负责从主池获取一个key,不负责移动到used池
*/
getApiKeyForRequest() {
if (this.apiKeys.length === 0) {
if (this.usedApiKeys.length > 0) {
// 如果主池为空,则从已使用池回收key
this.apiKeys.push(...this.usedApiKeys);
this.usedApiKeys = [];
console.log('🔄 回收已使用的API Keys到主池。');
} else {
return null; // 没有可用的key
}
}
return this.apiKeys.shift(); // 从主池取出第一个key
}
/**
* 获取第一个可用的API Key(用于模型列表请求等非核心请求)
*/
getFirstAvailableApiKey() {
if (this.apiKeys.length > 0) {
return this.apiKeys[0];
}
if (this.usedApiKeys.length > 0) {
return this.usedApiKeys[0];
}
return null;
}
/**
* 将API Key标记为失效
*/
markKeyAsInvalid(apiKey) {
// 从所有池中移除
this.apiKeys = this.apiKeys.filter(key => key !== apiKey);
this.usedApiKeys = this.usedApiKeys.filter(key => key !== apiKey);
// 添加到失效池 (如果不在里面)
if (apiKey && !this.invalidApiKeys.includes(apiKey)) {
this.invalidApiKeys.push(apiKey);
}
console.warn(`⚠️ API Key 已标记为失效: ${apiKey ? apiKey.substring(0, 10) : 'N/A'}...`);
console.warn(`当前可用API Keys: ${this.apiKeys.length}, 已使用: ${this.usedApiKeys.length}, 失效: ${this.invalidApiKeys.length}`);
}
/**
* 将API Key返回到已使用池 (表示成功使用过,可以循环)
*/
returnApiKeyToUsed(apiKey) {
if (apiKey && !this.usedApiKeys.includes(apiKey) && !this.invalidApiKeys.includes(apiKey)) {
this.usedApiKeys.push(apiKey);
}
}
/**
* 将API Key返回到主池 (例如,遇到暂时性错误如429,放回队列末尾)
*/
returnApiKeyToMain(apiKey) {
if (apiKey && !this.apiKeys.includes(apiKey) && !this.invalidApiKeys.includes(apiKey)) {
this.apiKeys.push(apiKey);
}
}
/**
* 验证授权头
*/
validateAuth(authHeader) {
if (!authHeader) {
return false;
}
const token = authHeader.replace('Bearer ', '');
return token === this.authToken;
}
}
/**
* 图片处理器类
*/
class ImageProcessor {
/**
* 从data URL中提取MIME类型和base64数据
*/
static parseDataUrl(dataUrl) {
try {
// 匹配data:image/jpeg;base64,<base64data>格式
const match = dataUrl.match(/^data:([^;]+);base64,(.+)$/);
if (!match) {
throw new Error('无效的data URL格式');
}
const mimeType = match[1];
const base64Data = match[2];
// 验证MIME类型是否为支持的图片格式
const supportedMimeTypes = [
'image/jpeg',
'image/jpg',
'image/png',
'image/gif',
'image/webp',
'image/bmp',
'image/tiff'
];
if (!supportedMimeTypes.includes(mimeType.toLowerCase())) {
throw new Error(`不支持的图片格式: ${mimeType}`);
}
return {
mimeType,
data: base64Data
};
} catch (error) {
console.error('解析图片data URL错误:', error);
throw error;
}
}
/**
* 验证base64数据是否有效
*/
static validateBase64(base64String) {
try {
// 基本格式检查
if (!/^[A-Za-z0-9+/]*={0,2}$/.test(base64String)) {
return false;
}
// 长度检查(base64编码长度应该是4的倍数)
return base64String.length % 4 === 0;
} catch (error) {
return false;
}
}
/**
* 从URL下载图片并转换为base64
*/
static async fetchImageAsBase64(imageUrl) {
try {
const response = await fetch(imageUrl, {
timeout: 30000, // 30秒超时
headers: {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'
}
});
if (!response.ok) {
throw new Error(`获取图片失败: HTTP ${response.status}`);
}
const contentType = response.headers.get('content-type');
if (!contentType || !contentType.startsWith('image/')) {
throw new Error(`URL返回的不是图片类型: ${contentType}`);
}
const buffer = await response.buffer();
const base64Data = buffer.toString('base64');
return {
mimeType: contentType,
data: base64Data
};
} catch (error) {
console.error('下载图片错误:', error);
throw error;
}
}
}
/**
* 消息转换器类(增强版)
*/
class MessageConverter {
/**
* 将OpenAI格式的消息转换为Gemini格式(支持图片)
*/
static async convertMessages(openaiMessages) {
const geminiMessages = [];
let currentRole = null;
let currentParts = [];
for (const message of openaiMessages) {
let role = message.role;
let content = message.content;
// 角色转换
if (role === 'system') {
// Gemini doesn't have a direct 'system' role. It's usually integrated into the first 'user' turn.
// For simplicity, we convert 'system' to 'user'.
role = 'user';
}
if (role === 'assistant') {
role = 'model';
}
// 处理内容
let parts = [];
if (typeof content === 'string') {
// 简单文本消息
parts = [{ text: content }];
} else if (Array.isArray(content)) {
// 多模态消息(包含文本和图片)
parts = await this.convertContentArray(content);
} else {
// 其他格式,转为文本
console.warn(`未知的内容格式: ${typeof content},将转为文本处理`);
parts = [{ text: String(content) }];
}
// 合并相同角色的连续消息以符合Gemini的单轮对话结构
// Gemini API expects alternating user/model turns (e.g., user, model, user, model).
// If OpenAI messages have consecutive 'user' or 'assistant' roles,
// we should merge them into a single Gemini turn.
if (role === currentRole) {
currentParts.push(...parts);
} else {
// 保存上一个角色的消息
if (currentRole !== null && currentParts.length > 0) {
geminiMessages.push({
role: currentRole,
parts: currentParts
});
}
// 开始新角色
currentRole = role;
currentParts = [...parts];
}
}
// 添加最后一个消息
if (currentRole !== null && currentParts.length > 0) {
geminiMessages.push({
role: currentRole,
parts: currentParts
});
}
return geminiMessages;
}
/**
* 转换OpenAI的content数组为Gemini的parts格式
*/
static async convertContentArray(contentArray) {
const parts = [];
for (const item of contentArray) {
try {
if (item.type === 'text') {
// 文本内容
parts.push({ text: item.text || '' });
} else if (item.type === 'image_url') {
// 图片内容
const imagePart = await this.convertImageContent(item);
if (imagePart) {
parts.push(imagePart);
}
} else {
// 其他类型,尝试转为文本
console.warn(`未知的内容类型: ${item.type},将转为文本处理`);
parts.push({ text: JSON.stringify(item) });
}
} catch (error) {
console.error('转换内容项错误:', error);
// 出错时返回错误信息文本,而不是抛出异常,避免整个请求失败
parts.push({ text: `[内容项处理失败: ${error.message}]` });
}
}
return parts;
}
/**
* 转换图片内容为Gemini格式
*/
static async convertImageContent(imageItem) {
try {
const imageUrl = imageItem.image_url?.url;
if (!imageUrl) {
throw new Error('缺少图片URL');
}
let imageData;
if (imageUrl.startsWith('data:')) {
// 处理base64数据URL
imageData = ImageProcessor.parseDataUrl(imageUrl);
// 验证base64数据
if (!ImageProcessor.validateBase64(imageData.data)) {
throw new Error('无效的base64图片数据');
}
} else if (imageUrl.startsWith('http://') || imageUrl.startsWith('https://')) {
// 处理网络图片URL
imageData = await ImageProcessor.fetchImageAsBase64(imageUrl);
} else {
throw new Error(`不支持的图片URL格式: ${imageUrl}`);
}
// 返回Gemini格式的图片数据
return {
inlineData: {
mimeType: imageData.mimeType,
data: imageData.data
}
};
} catch (error) {
console.error('转换图片内容错误:', error);
// 返回错误信息文本,而不是抛出异常
return { text: `[图片处理失败: ${error.message}]` };
}
}
/**
* 从OpenAI请求中提取参数
*/
static extractParams(openaiRequest) {
return {
model: openaiRequest.model || 'gemini-1.5-flash',
messages: openaiRequest.messages || [],
stream: openaiRequest.stream || false,
temperature: openaiRequest.temperature,
maxTokens: openaiRequest.max_tokens,
topP: openaiRequest.top_p
};
}
}
/**
* 模型管理类
*/
class ModelManager {
constructor(config) {
this.config = config;
this.cachedModels = null;
this.cacheExpiry = null;
this.cacheTimeout = 5 * 60 * 1000; // 5分钟缓存
}
/**
* 获取模型列表
*/
async getModels() {
if (this.cachedModels && this.cacheExpiry && Date.now() < this.cacheExpiry) {
return { success: true, data: this.cachedModels };
}
const apiKey = this.config.getFirstAvailableApiKey();
if (!apiKey) {
return {
success: false,
error: '没有可用的API Key',
status: 503
};
}
try {
const response = await fetch(`https://generativelanguage.googleapis.com/v1beta/models?key=${apiKey}`, {
method: 'GET',
headers: {
'Content-Type': 'application/json'
}
});
if (!response.ok) {
// 如果是 403 错误,标记 key 失效
if (response.status === 403) {
this.config.markKeyAsInvalid(apiKey);
}
return {
success: false,
error: `获取模型列表失败: ${response.status}`,
status: response.status
};
}
const geminiResponse = await response.json();
const filteredModels = this.filterModels(geminiResponse.models || []);
this.cachedModels = filteredModels;
this.cacheExpiry = Date.now() + this.cacheTimeout;
return { success: true, data: filteredModels };
} catch (error) {
console.error('获取模型列表错误:', error);
return {
success: false,
error: '网络请求失败',
status: 500
};
}
}
/**
* 过滤模型 - 保持原始Gemini模型名,并增加视觉模型支持
*/
filterModels(models) {
const allowedPrefixes = [
'models/gemini-2.5-flash',
'models/gemini-2.0-flash',
'models/gemini-1.5-flash',
'models/gemini-1.5-pro' // 允许 Gemini 1.5 Pro 模型
];
const excludedModels = [
'models/gemini-1.5-flash-8b' // 排除特定的较小模型
];
const filteredModels = models.filter(model => {
const modelName = model.name;
// 排除特定模型
if (excludedModels.some(excluded => modelName.startsWith(excluded))) {
return false;
}
// 允许特定的完整模型名 (例如, "models/gemini-2.5-pro" 不以任何前缀开始)
if (modelName === "models/gemini-2.5-pro" || modelName === "models/gemini-1.5-pro") {
return true;
}
// 允许以特定前缀开头的模型
return allowedPrefixes.some(prefix => modelName.startsWith(prefix));
});
// 转换为OpenAI格式但保持Gemini模型名
const processedModels = filteredModels.map(model => {
const modelId = model.name.replace('models/', '');
return {
id: modelId, // 直接使用Gemini模型名
object: 'model',
created: Math.floor(Date.now() / 1000),
owned_by: 'google',
permission: [
{
id: `modelperm-${modelId}`,
object: 'model_permission',
created: Math.floor(Date.now() / 1000),
allow_create_engine: false,
allow_sampling: true,
allow_logprobs: false,
allow_search_indices: false,
allow_view: true,
allow_fine_tuning: false,
organization: '*',
group: null,
is_blocking: false
}
],
root: modelId,
parent: null
};
});
return {
object: 'list',
data: processedModels
};
}
}
/**
* Gemini API 请求构建器类
*/
class GeminiRequestBuilder {
constructor(config) {
this.config = config;
}
/**
* 构建Gemini API请求体
*/
buildRequestBody(geminiMessages, params) {
const requestBody = {
contents: geminiMessages,
safetySettings: this.config.geminiSafety,
generationConfig: {}
};
if (params.temperature !== undefined) {
requestBody.generationConfig.temperature = params.temperature;
}
if (params.maxTokens !== undefined) {
requestBody.generationConfig.maxOutputTokens = params.maxTokens;
}
if (params.topP !== undefined) {
requestBody.generationConfig.topP = params.topP;
}
return requestBody;
}
/**
* 构建Gemini API URL
*/
buildApiUrl(model, apiKey, isStream = false) {
const method = isStream ? 'streamGenerateContent' : 'generateContent';
return `https://generativelanguage.googleapis.com/v1beta/models/${model}:${method}?key=${apiKey}`;
}
}
/**
* 响应转换器类
*/
class ResponseConverter {
/**
* 将Gemini流式响应块转换为OpenAI格式
*/
static convertStreamChunk(geminiData, requestId, model) {
try {
if (geminiData.candidates && geminiData.candidates[0]) {
const candidate = geminiData.candidates[0];
if (candidate.content && candidate.content.parts) {
const text = candidate.content.parts[0]?.text || '';
const openaiChunk = {
id: requestId,
object: 'chat.completion.chunk',
created: Math.floor(Date.now() / 1000),
model: model, // 使用当前请求的模型名
choices: [{
index: 0,
delta: { content: text },
finish_reason: candidate.finishReason === 'STOP' ? 'stop' : null
}]
};
return `data: ${JSON.stringify(openaiChunk)}\n\n`;
}
}
// 处理安全设置阻断等情况
if (geminiData.promptFeedback?.blockReason || geminiData.candidates?.[0]?.finishReason === 'SAFETY') {
console.warn('Gemini stream blocked:', geminiData.promptFeedback?.blockReason || 'SAFETY');
const openaiChunk = {
id: requestId,
object: 'chat.completion.chunk',
created: Math.floor(Date.now() / 1000),
model: model,
choices: [{
index: 0,
delta: { content: `[Content blocked due to: ${geminiData.promptFeedback?.blockReason || 'SAFETY'}]` },
finish_reason: 'content_filter'
}]
};
return `data: ${JSON.stringify(openaiChunk)}\n\n`;
}
return '';
} catch (error) {
console.error('转换流响应块错误:', error);
return '';
}
}
/**
* 将Gemini非流式响应转换为OpenAI格式
*/
static convertNormalResponse(geminiResponse, requestId, model) {
const openaiResponse = {
id: requestId,
object: 'chat.completion',
created: Math.floor(Date.now() / 1000),
model: model, // 使用当前请求的模型名
choices: [],
usage: {
prompt_tokens: 0,
completion_tokens: 0,
total_tokens: 0
}
};
if (geminiResponse.candidates && geminiResponse.candidates[0]) {
const candidate = geminiResponse.candidates[0];
if (candidate.content && candidate.content.parts) {
const text = candidate.content.parts.map(part => part.text).join('');
openaiResponse.choices.push({
index: 0,
message: {
role: 'assistant',
content: text
},
finish_reason: candidate.finishReason === 'STOP' ? 'stop' : 'length' // Gemini 'STOP' is stop, others often length
});
} else if (candidate.finishReason === 'SAFETY') {
// 如果有内容但被安全机制阻断
openaiResponse.choices.push({
index: 0,
message: {
role: 'assistant',
content: `Content generation was blocked due to safety policy.`
},
finish_reason: 'content_filter'
});
}
} else if (geminiResponse.promptFeedback && geminiResponse.promptFeedback.blockReason) {
// Handle content moderation blocks (even before candidate generation)
console.warn('Gemini response blocked:', geminiResponse.promptFeedback.blockReason);
openaiResponse.choices.push({
index: 0,
message: {
role: 'assistant',
content: `Content generation was blocked due to: ${geminiResponse.promptFeedback.blockReason}.`
},
finish_reason: 'content_filter'
});
}
// 尝试从usage信息中获取token使用量
if (geminiResponse.usageMetadata) {
openaiResponse.usage = {
prompt_tokens: geminiResponse.usageMetadata.promptTokenCount || 0,
completion_tokens: geminiResponse.usageMetadata.candidatesTokenCount || 0,
total_tokens: geminiResponse.usageMetadata.totalTokenCount || 0
};
}
return openaiResponse;
}
/**
* 将文本内容分块为假流式响应的生成器
*/
static * chunkTextForFakeStream(text, requestId, model, chunkSize = 50) {
if (!text) {
yield `data: ${JSON.stringify({
id: requestId,
object: 'chat.completion.chunk',
created: Math.floor(Date.now() / 1000),
model: model,
choices: [{ index: 0, delta: { content: '' }, finish_reason: 'stop' }]
})}\n\n`;
return;
}
let offset = 0;
while (offset < text.length) {
const chunk = text.substring(offset, Math.min(offset + chunkSize, text.length));
offset += chunkSize;
const openaiChunk = {
id: requestId,
object: 'chat.completion.chunk',
created: Math.floor(Date.now() / 1000),
model: model,
choices: [{
index: 0,
delta: { content: chunk },
finish_reason: null // Not yet finished
}]
};
yield `data: ${JSON.stringify(openaiChunk)}\n\n`;
}
// Final chunk with finish_reason
const finalChunk = {
id: requestId,
object: 'chat.completion.chunk',
created: Math.floor(Date.now() / 1000),
model: model,
choices: [{
index: 0,
delta: {}, // Delta for final chunk might be empty
finish_reason: 'stop' // Indicate end of stream
}]
};
yield `data: ${JSON.stringify(finalChunk)}\n\n`;
}
}
/**
* Gemini实时流式响应解析器
*/
class GeminiRealtimeStreamParser {
constructor(response, onChunk) {
this.response = response;
this.onChunk = onChunk;
this.buffer = '';
this.bufferLv = 0; // Tracks JSON object nesting level
this.inString = false;
this.escapeNext = false;
this.decoder = new TextDecoder();
}
async start() {
try {
const reader = this.response.body.getReader();
while (true) {
const { done, value } = await reader.read();
if (done) break;
const text = this.decoder.decode(value, { stream: true });
await this.processText(text);
}
// Handle any remaining buffer after stream ends
await this.handleRemainingBuffer();
} catch (error) {
console.error('流式解析错误:', error);
throw error;
}
}
async processText(text) {
for (const char of text) {
// Handle escaped characters
if (this.escapeNext) {
this.buffer += char;
this.escapeNext = false;
continue;
}
// Toggle inString state
if (char === '"') {
this.inString = !this.inString;
} else if (char === '\\' && this.inString) {
// Handle escape character itself within a string
this.escapeNext = true;
this.buffer += char; // Add the backslash to buffer
continue;
}
// Track JSON nesting level outside of strings
if (!this.inString) {
if (char === '{' || char === '[') {
this.bufferLv++;
} else if (char === '}' || char === ']') {
this.bufferLv--;
}
}
this.buffer += char; // Always add char to buffer
// If we are at the top level of a complete JSON object (bufferLv === 0 for a root object)
// For Gemini's stream format, each object typically ends with } and is a standalone chunk
if (!this.inString && this.bufferLv === 0 && char === '}' && this.buffer.trim() !== '') {
try {
const bufferJson = JSON.parse(this.buffer);
await this.onChunk(bufferJson);
} catch (parseError) {
console.error('解析Gemini流数据错误:', parseError);
// It's possible to get partial JSON or errors, log and clear buffer
}
this.buffer = ''; // Reset buffer after processing a complete object
}
}
}
async handleRemainingBuffer() {
if (this.buffer.trim() !== '') {
try {
// Attempt to parse any remaining content as a full JSON object.
const bufferJson = JSON.parse(this.buffer);
await this.onChunk(bufferJson);
} catch (parseError) {
console.error('解析最后的缓冲区数据错误:', parseError);
}
}
}
}
/**
* 认证中间件
*/
class AuthMiddleware {
constructor(config) {
this.config = config;
}
middleware() {
return (req, res, next) => {
// 跳过健康检查和预检请求
if (req.path === '/health' || req.method === 'OPTIONS') {
return next();
}
const authHeader = req.headers.authorization;
if (!this.config.validateAuth(authHeader)) {
return res.status(401).json({
error: {
message: 'Invalid authentication credentials',
type: 'invalid_request_error',
code: 'invalid_api_key'
}
});
}
next();
};
}
}
/**
* API代理服务类
*/
class ApiProxyService {
constructor() {
this.config = new Config();
this.requestBuilder = new GeminiRequestBuilder(this.config);
this.modelManager = new ModelManager(this.config);
this.authMiddleware = new AuthMiddleware(this.config);
}
/**
* 处理聊天API请求(支持图片)- 真实流式
*/
async handleChatRequest(req, res) {
try {
const requestId = `chatcmpl-${uuidv4()}`;
const params = MessageConverter.extractParams(req.body);
// 异步转换消息(支持图片处理)
const geminiMessages = await MessageConverter.convertMessages(params.messages);
if (!geminiMessages || geminiMessages.length === 0) {
return res.status(400).json({
error: {
message: '无效的消息格式或消息为空',
type: 'invalid_request_error',
code: 'invalid_messages'
}
});
}
const requestBody = this.requestBuilder.buildRequestBody(geminiMessages, params);
if (params.stream) {
const result = await this.executeRealStreamRequest(requestBody, params, requestId, res);
if (!result.success) {
res.status(result.status || 500).json({ error: result.error });
}
} else {
const result = await this.executeNormalRequest(requestBody, params, requestId);
if (result.success) {
res.json(result.data);
} else {
res.status(result.status || 500).json({ error: result.error });
}
}
} catch (error) {
console.error('处理聊天请求错误:', error);
res.status(500).json({
error: {
message: '内部服务器错误: ' + error.message,
type: 'internal_server_error',
code: 'server_error'
}
});
}
}
/**
* 新增处理假流式聊天API请求
* 如果客户端请求流式,则内部调用非流式 Gemini API,然后转为假流发送给客户端。
* 如果客户端请求非流式,则直接代理非流式请求。
*/
async handleFakeStreamChatRequest(req, res) {
try {
const requestId = `chatcmpl-${uuidv4()}`;
const params = MessageConverter.extractParams(req.body);
const geminiMessages = await MessageConverter.convertMessages(params.messages);
if (!geminiMessages || geminiMessages.length === 0) {
return res.status(400).json({
error: {
message: '无效的消息格式或消息为空',
type: 'invalid_request_error',
code: 'invalid_messages'
}
});
}
const requestBody = this.requestBuilder.buildRequestBody(geminiMessages, params);
if (params.stream) {
// 客户端请求流式,但我们内部执行非流式 Gemini 请求,然后假流返回
const result = await this.executeFakeStreamResponse(requestBody, params, requestId, res);
if (!result.success) {
res.status(result.status || 500).json({ error: result.error });
}
} else {
// 客户端请求非流式,直接执行非流式请求
const result = await this.executeNormalRequest(requestBody, params, requestId);
if (result.success) {
res.json(result.data);
} else {
res.status(result.status || 500).json({ error: result.error });
}
}
} catch (error) {
console.error('处理假流式聊天请求错误:', error);
res.status(500).json({
error: {
message: '内部服务器错误: ' + error.message,
type: 'internal_server_error',
code: 'server_error'
}
});
}
}
/**
* 执行真实的流式请求
*/
async executeRealStreamRequest(requestBody, params, requestId, res, retryCount = 0) {
const maxRetries = 3;
let apiKey = this.config.getApiKeyForRequest();
if (!apiKey) {
return { success: false, error: '目前暂无可用的API Key', status: 503 };
}
try {
const apiUrl = this.requestBuilder.buildApiUrl(params.model, apiKey, true);
const response = await fetch(apiUrl, {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify(requestBody)
});
if (response.status === 403) {
this.config.markKeyAsInvalid(apiKey);
if (retryCount < maxRetries) {
console.log(`真实流式请求重试 ${retryCount + 1}/${maxRetries} (403错误)。`);
return await this.executeRealStreamRequest(requestBody, params, requestId, res, retryCount + 1);
}
return { success: false, error: 'API Key 无效或已失效', status: 403 };
}
if (response.status === 429) {
this.config.returnApiKeyToMain(apiKey); // 放回主池末尾,等待冷却
if (retryCount < maxRetries) {
console.log(`真实流式请求重试 ${retryCount + 1}/${maxRetries} (429错误)。`);
return await this.executeRealStreamRequest(requestBody, params, requestId, res, retryCount + 1);
}
return { success: false, error: '请求频率过高,请稍后重试', status: 429 };
}
if (response.status >= 500 && response.status < 600) { // 5xx errors
this.config.returnApiKeyToMain(apiKey); // 服务器错误,放回主池
if (retryCount < maxRetries) {
console.log(`真实流式请求重试 ${retryCount + 1}/${maxRetries} (${response.status}错误)。`);
return await this.executeRealStreamRequest(requestBody, params, requestId, res, retryCount + 1);
}
return { success: false, error: '目前服务器繁忙,请稍后重试', status: response.status };
}
if (!response.ok) {
const errorText = await response.text();
console.error(`API请求失败: ${response.status}, 错误信息: ${errorText}`);
this.config.returnApiKeyToMain(apiKey); // 其他非 2xx 错误,也放回主池
return { success: false, error: `API请求失败: ${response.status} - ${errorText.substring(0, 100)}`, status: response.status };
}
// 如果请求成功,将 key 放入已使用池
this.config.returnApiKeyToUsed(apiKey);
res.writeHead(200, {
'Content-Type': 'text/event-stream',
'Cache-Control': 'no-cache',
'Connection': 'keep-alive',
'Access-Control-Allow-Origin': '*'
});
const parser = new GeminiRealtimeStreamParser(response, async (geminiData) => {
const convertedChunk = ResponseConverter.convertStreamChunk(geminiData, requestId, params.model);
if (convertedChunk) {
res.write(convertedChunk);
}
});
await parser.start();
res.write('data: [DONE]\n\n');
res.end();
return { success: true };
} catch (error) {
console.error('执行流式请求错误:', error);
this.config.returnApiKeyToMain(apiKey); // 网络错误,放回主池
if (retryCount < maxRetries) {
console.log(`真实流式请求重试 ${retryCount + 1}/${maxRetries} (网络错误)。`);
return await this.executeRealStreamRequest(requestBody, params, requestId, res, retryCount + 1);
}
return { success: false, error: '网络请求失败: ' + error.message, status: 500 };
}
}
/**
* 执行非流式请求
*/
async executeNormalRequest(requestBody, params, requestId, retryCount = 0) {
const maxRetries = 3;
let apiKey = this.config.getApiKeyForRequest();
if (!apiKey) {
return { success: false, error: '目前暂无可用的API Key', status: 503 };
}
try {
const apiUrl = this.requestBuilder.buildApiUrl(params.model, apiKey, false);
const response = await fetch(apiUrl, {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify(requestBody)
});
if (response.status === 403) {
this.config.markKeyAsInvalid(apiKey);
if (retryCount < maxRetries) {
console.log(`非流式请求重试 ${retryCount + 1}/${maxRetries} (403错误)。`);
return await this.executeNormalRequest(requestBody, params, requestId, retryCount + 1);
}
return { success: false, error: 'API Key 无效或已失效', status: 403 };
}
if (response.status === 429) {
this.config.returnApiKeyToMain(apiKey);
if (retryCount < maxRetries) {
console.log(`非流式请求重试 ${retryCount + 1}/${maxRetries} (429错误)。`);
return await this.executeNormalRequest(requestBody, params, requestId, retryCount + 1);
}
return { success: false, error: '请求频率过高,请稍后重试', status: 429 };
}
if (response.status >= 500 && response.status < 600) { // 5xx errors
this.config.returnApiKeyToMain(apiKey);
if (retryCount < maxRetries) {
console.log(`非流式请求重试 ${retryCount + 1}/${maxRetries} (${response.status}错误)。`);
return await this.executeNormalRequest(requestBody, params, requestId, retryCount + 1);
}
return { success: false, error: '目前服务器繁忙,请稍后重试', status: response.status };
}
if (!response.ok) {
const errorText = await response.text();
console.error(`API请求失败: ${response.status}, 错误信息: ${errorText}`);
this.config.returnApiKeyToMain(apiKey);
return { success: false, error: `API请求失败: ${response.status} - ${errorText.substring(0, 100)}`, status: response.status };
}
this.config.returnApiKeyToUsed(apiKey);
const geminiResponse = await response.json();
const openaiResponse = ResponseConverter.convertNormalResponse(geminiResponse, requestId, params.model);
return { success: true, data: openaiResponse };
} catch (error) {
console.error('执行非流式请求错误:', error);
this.config.returnApiKeyToMain(apiKey);
if (retryCount < maxRetries) {
console.log(`非流式请求重试 ${retryCount + 1}/${maxRetries} (网络错误)。`);
return await this.executeNormalRequest(requestBody, params, requestId, retryCount + 1);
}
return { success: false, error: '网络请求失败: ' + error.message, status: 500 };
}
}
/**
* 执行假流式响应 (内部调用非流式 Gemini API, 然后转换为流式响应)
*/
async executeFakeStreamResponse(requestBody, params, requestId, res) {
let pingInterval;
const pingDelayMs = 1000; // 每1秒发送一次ping消息
try {
res.writeHead(200, {
'Content-Type': 'text/event-stream',
'Cache-Control': 'no-cache',
'Connection': 'keep-alive',
'Access-Control-Allow-Origin': '*'
});
// 开始发送ping消息
pingInterval = setInterval(() => {
const pingChunk = {
id: requestId,
object: 'chat.completion.chunk',
created: Math.floor(Date.now() / 1000),
model: params.model,
choices: [{
index: 0,
delta: {}, // 空 delta 用于 ping
finish_reason: null
}]
};
// console.log('发送 ping 块...');
res.write(`data: ${JSON.stringify(pingChunk)}\n\n`);
}, pingDelayMs);
// 执行实际的非流式请求到 Gemini
const result = await this.executeNormalRequest(requestBody, params, requestId);
// 请求完成后立即清除 ping 间隔
clearInterval(pingInterval);
pingInterval = null;
if (result.success) {
const fullContent = result.data.choices[0]?.message?.content || '';
// 将完整内容分块并以假流形式发送
for (const chunk of ResponseConverter.chunkTextForFakeStream(fullContent, requestId, params.model)) {
res.write(chunk);
// 可以添加一个小的延迟来模拟更真实的“流式”体验
await new Promise(resolve => setTimeout(resolve, 50));
}
res.write('data: [DONE]\n\n');
res.end();
return { success: true };
} else {
// 如果非流式请求失败,发送错误块并结束流
res.write(`data: ${JSON.stringify({ error: result.error })}\n\n`);
res.write('data: [DONE]\n\n');
res.end();
return { success: false, error: result.error, status: result.status };
}
} catch (error) {
console.error('执行假流式请求错误:', error);
if (pingInterval) {
clearInterval(pingInterval);
}
// 如果在响应发送前或发送过程中出现错误
res.status(500).json({
error: {
message: '内部服务器错误: ' + error.message,
type: 'internal_server_error',
code: 'server_error'
}
});
return { success: false, error: '内部服务器错误: ' + error.message, status: 500 };
}
}
/**
* 处理模型列表请求
*/
async handleModelsRequest(req, res) {
try {
const result = await this.modelManager.getModels();
if (result.success) {
res.json(result.data);
} else {
res.status(result.status || 500).json({ error: result.error });
}
} catch (error) {
console.error('处理模型列表请求错误:', error);
res.status(500).json({ error: '内部服务器错误' });
}
}
}
/**
* Express 服务器
*/
class Server {
constructor() {
this.app = express();
this.apiProxy = new ApiProxyService();
this.setupMiddleware();
this.setupRoutes();
}
setupMiddleware() {
// CORS配置
this.app.use(cors({
origin: '*',
credentials: true,
optionsSuccessStatus: 200
}));
// JSON解析 - 增加大小限制以支持图片
this.app.use(express.json({ limit: '50mb' }));
this.app.use(express.urlencoded({ limit: '50mb', extended: true }));
// 认证中间件
this.app.use(this.apiProxy.authMiddleware.middleware());
// 请求日志中间件
this.app.use((req, res, next) => {
const start = Date.now();
res.on('finish', () => {
const duration = Date.now() - start;
console.log(`${req.method} ${req.path} - ${res.statusCode} [${duration}ms]`);
});
next();
});
}
setupRoutes() {
// 聊天接口(支持图片,真实流式)
this.app.post('/v1/chat/completions', (req, res) => {
this.apiProxy.handleChatRequest(req, res);
});
// 新增聊天接口(支持图片,假流式)
this.app.post('/fakestream/v1/chat/completions', (req, res) => {
this.apiProxy.handleFakeStreamChatRequest(req, res);
});
// 模型列表接口
this.app.get('/v1/models', (req, res) => {
this.apiProxy.handleModelsRequest(req, res);
});
// 健康检查接口
this.app.get('/health', (req, res) => {
res.json({
status: 'healthy',
timestamp: new Date().toISOString(),
availableKeys: this.apiProxy.config.apiKeys.length, // 可用于新请求的key
usedKeys: this.apiProxy.config.usedApiKeys.length, // 成功使用过的key (待下次循环)
invalidKeys: this.apiProxy.config.invalidApiKeys.length, // 已失效的key
totalKeysConfigured: this.apiProxy.config.apiKeys.length + this.apiProxy.config.usedApiKeys.length + this.apiProxy.config.invalidApiKeys.length,
version: '2.1.0', // 更新版本号以反映新功能
features: ['text', 'vision', 'stream_real', 'stream_fake', 'load_balancing']
});
});
// 404处理
this.app.use('*', (req, res) => {
res.status(404).json({
error: {
message: 'Not Found',
type: 'invalid_request_error',
code: 'not_found'
}
});
});
// 全局错误处理
this.app.use((err, req, res, next) => {
console.error('服务器错误:', err);
res.status(500).json({
error: {
message: '内部服务器错误',
type: 'internal_server_error',
code: 'server_error'
}
});
});
}
start(port = 3000) {
this.app.listen(port, () => {
console.log(`🚀 OpenAI to Gemini Proxy Server (Enhanced) 启动在端口 ${port}`);
console.log(`📍 真实流式聊天API: http://localhost:${port}/v1/chat/completions`);
console.log(`📍 假流式聊天API (新功能): http://localhost:${port}/v1/chat/completions/fakestream`);
console.log(`📋 模型列表: http://localhost:${port}/v1/models`);
console.log(`🔍 健康检查: http://localhost:${port}/health`);
});
}
}
// 启动服务器
const server = new Server();
const port = process.env.PORT || 3000;
server.start(port);