Update index.js
Browse files
index.js
CHANGED
@@ -12,16 +12,13 @@ dotenv.config();
|
|
12 |
*/
|
13 |
class Config {
|
14 |
constructor() {
|
15 |
-
// 主API Key池 (可用于新请求)
|
16 |
-
this.apiKeys = [];
|
17 |
-
|
18 |
this.initializeApiKeys();
|
19 |
this.initializeAuth();
|
20 |
-
|
21 |
-
// 已使用的API Key池
|
22 |
this.usedApiKeys = [];
|
23 |
|
24 |
-
// 失效的API Key池
|
25 |
this.invalidApiKeys = [];
|
26 |
|
27 |
// Gemini安全设置
|
@@ -82,24 +79,24 @@ class Config {
|
|
82 |
|
83 |
/**
|
84 |
* 获取可用的API Key(负载均衡)
|
85 |
-
* 只负责从主池获取一个key,不负责移动到used池
|
86 |
*/
|
87 |
-
|
88 |
if (this.apiKeys.length === 0) {
|
89 |
if (this.usedApiKeys.length > 0) {
|
90 |
-
// 如果主池为空,则从已使用池回收key
|
91 |
this.apiKeys.push(...this.usedApiKeys);
|
92 |
this.usedApiKeys = [];
|
93 |
-
console.log('🔄 回收已使用的API Keys到主池。');
|
94 |
} else {
|
95 |
-
return null;
|
96 |
}
|
97 |
}
|
98 |
-
|
|
|
|
|
|
|
99 |
}
|
100 |
|
101 |
/**
|
102 |
-
* 获取第一个可用的API Key
|
103 |
*/
|
104 |
getFirstAvailableApiKey() {
|
105 |
if (this.apiKeys.length > 0) {
|
@@ -115,36 +112,31 @@ class Config {
|
|
115 |
* 将API Key标记为失效
|
116 |
*/
|
117 |
markKeyAsInvalid(apiKey) {
|
118 |
-
|
119 |
-
|
120 |
-
|
|
|
121 |
|
122 |
-
|
123 |
-
if (
|
|
|
|
|
|
|
|
|
124 |
this.invalidApiKeys.push(apiKey);
|
125 |
}
|
126 |
|
127 |
-
console.warn(`⚠️ API Key 已标记为失效: ${apiKey
|
128 |
-
console.warn(`当前可用API Keys: ${this.apiKeys.length}, 已使用: ${this.usedApiKeys.length}, 失效: ${this.invalidApiKeys.length}`);
|
129 |
}
|
130 |
|
131 |
/**
|
132 |
-
* 将API Key
|
133 |
*/
|
134 |
-
|
135 |
-
if (
|
136 |
this.usedApiKeys.push(apiKey);
|
137 |
}
|
138 |
}
|
139 |
-
|
140 |
-
/**
|
141 |
-
* 将API Key返回到主池 (例如,遇到暂时性错误如429,放回队列末尾)
|
142 |
-
*/
|
143 |
-
returnApiKeyToMain(apiKey) {
|
144 |
-
if (apiKey && !this.apiKeys.includes(apiKey) && !this.invalidApiKeys.includes(apiKey)) {
|
145 |
-
this.apiKeys.push(apiKey);
|
146 |
-
}
|
147 |
-
}
|
148 |
|
149 |
/**
|
150 |
* 验证授权头
|
@@ -272,9 +264,7 @@ class MessageConverter {
|
|
272 |
|
273 |
// 角色转换
|
274 |
if (role === 'system') {
|
275 |
-
|
276 |
-
// For simplicity, we convert 'system' to 'user'.
|
277 |
-
role = 'user';
|
278 |
}
|
279 |
|
280 |
if (role === 'assistant') {
|
@@ -292,14 +282,10 @@ class MessageConverter {
|
|
292 |
parts = await this.convertContentArray(content);
|
293 |
} else {
|
294 |
// 其他格式,转为文本
|
295 |
-
console.warn(`未知的内容格式: ${typeof content},将转为文本处理`);
|
296 |
parts = [{ text: String(content) }];
|
297 |
}
|
298 |
|
299 |
-
//
|
300 |
-
// Gemini API expects alternating user/model turns (e.g., user, model, user, model).
|
301 |
-
// If OpenAI messages have consecutive 'user' or 'assistant' roles,
|
302 |
-
// we should merge them into a single Gemini turn.
|
303 |
if (role === currentRole) {
|
304 |
currentParts.push(...parts);
|
305 |
} else {
|
@@ -352,8 +338,8 @@ class MessageConverter {
|
|
352 |
}
|
353 |
} catch (error) {
|
354 |
console.error('转换内容项错误:', error);
|
355 |
-
//
|
356 |
-
|
357 |
}
|
358 |
}
|
359 |
|
@@ -437,7 +423,6 @@ class ModelManager {
|
|
437 |
|
438 |
const apiKey = this.config.getFirstAvailableApiKey();
|
439 |
if (!apiKey) {
|
440 |
-
console.log("无key")
|
441 |
return {
|
442 |
success: false,
|
443 |
error: '没有可用的API Key',
|
@@ -454,11 +439,6 @@ class ModelManager {
|
|
454 |
});
|
455 |
|
456 |
if (!response.ok) {
|
457 |
-
console.log("请求模型失败");
|
458 |
-
// 如果是 403 错误,标记 key 失效
|
459 |
-
if (response.status === 403) {
|
460 |
-
this.config.markKeyAsInvalid(apiKey);
|
461 |
-
}
|
462 |
return {
|
463 |
success: false,
|
464 |
error: `获取模型列表失败: ${response.status}`,
|
@@ -491,28 +471,23 @@ class ModelManager {
|
|
491 |
const allowedPrefixes = [
|
492 |
'models/gemini-2.5-flash',
|
493 |
'models/gemini-2.0-flash',
|
494 |
-
'models/gemini-1.5-flash'
|
495 |
-
'models/gemini-1.5-pro' // 允许 Gemini 1.5 Pro 模型
|
496 |
];
|
497 |
|
498 |
const excludedModels = [
|
499 |
-
'models/gemini-1.5-flash-8b'
|
500 |
];
|
501 |
|
502 |
const filteredModels = models.filter(model => {
|
503 |
const modelName = model.name;
|
504 |
|
505 |
-
// 排除特定模型
|
506 |
if (excludedModels.some(excluded => modelName.startsWith(excluded))) {
|
507 |
return false;
|
508 |
}
|
509 |
-
|
510 |
-
// 允许特定的完整模型名 (例如, "models/gemini-2.5-pro" 不以任何前缀开始)
|
511 |
-
if (modelName === "models/gemini-2.5-pro" || modelName === "models/gemini-1.5-pro") {
|
512 |
return true;
|
513 |
}
|
514 |
|
515 |
-
// 允许以特定前缀开头的模型
|
516 |
return allowedPrefixes.some(prefix => modelName.startsWith(prefix));
|
517 |
});
|
518 |
|
@@ -622,23 +597,6 @@ class ResponseConverter {
|
|
622 |
return `data: ${JSON.stringify(openaiChunk)}\n\n`;
|
623 |
}
|
624 |
}
|
625 |
-
// 处理安全设置阻断等情况
|
626 |
-
if (geminiData.promptFeedback?.blockReason || geminiData.candidates?.[0]?.finishReason === 'SAFETY') {
|
627 |
-
console.warn('Gemini stream blocked:', geminiData.promptFeedback?.blockReason || 'SAFETY');
|
628 |
-
const openaiChunk = {
|
629 |
-
id: requestId,
|
630 |
-
object: 'chat.completion.chunk',
|
631 |
-
created: Math.floor(Date.now() / 1000),
|
632 |
-
model: model,
|
633 |
-
choices: [{
|
634 |
-
index: 0,
|
635 |
-
delta: { content: `[Content blocked due to: ${geminiData.promptFeedback?.blockReason || 'SAFETY'}]` },
|
636 |
-
finish_reason: 'content_filter'
|
637 |
-
}]
|
638 |
-
};
|
639 |
-
return `data: ${JSON.stringify(openaiChunk)}\n\n`;
|
640 |
-
}
|
641 |
-
|
642 |
return '';
|
643 |
} catch (error) {
|
644 |
console.error('转换流响应块错误:', error);
|
@@ -673,30 +631,9 @@ class ResponseConverter {
|
|
673 |
role: 'assistant',
|
674 |
content: text
|
675 |
},
|
676 |
-
finish_reason: candidate.finishReason === 'STOP' ? 'stop' : 'length'
|
677 |
-
});
|
678 |
-
} else if (candidate.finishReason === 'SAFETY') {
|
679 |
-
// 如果有内容但被安全机制阻断
|
680 |
-
openaiResponse.choices.push({
|
681 |
-
index: 0,
|
682 |
-
message: {
|
683 |
-
role: 'assistant',
|
684 |
-
content: `Content generation was blocked due to safety policy.`
|
685 |
-
},
|
686 |
-
finish_reason: 'content_filter'
|
687 |
});
|
688 |
}
|
689 |
-
} else if (geminiResponse.promptFeedback && geminiResponse.promptFeedback.blockReason) {
|
690 |
-
// Handle content moderation blocks (even before candidate generation)
|
691 |
-
console.warn('Gemini response blocked:', geminiResponse.promptFeedback.blockReason);
|
692 |
-
openaiResponse.choices.push({
|
693 |
-
index: 0,
|
694 |
-
message: {
|
695 |
-
role: 'assistant',
|
696 |
-
content: `Content generation was blocked due to: ${geminiResponse.promptFeedback.blockReason}.`
|
697 |
-
},
|
698 |
-
finish_reason: 'content_filter'
|
699 |
-
});
|
700 |
}
|
701 |
|
702 |
// 尝试从usage信息中获取token使用量
|
@@ -712,24 +649,15 @@ class ResponseConverter {
|
|
712 |
}
|
713 |
|
714 |
/**
|
715 |
-
*
|
716 |
*/
|
717 |
-
static
|
718 |
-
|
719 |
-
|
720 |
-
id: requestId,
|
721 |
-
object: 'chat.completion.chunk',
|
722 |
-
created: Math.floor(Date.now() / 1000),
|
723 |
-
model: model,
|
724 |
-
choices: [{ index: 0, delta: { content: '' }, finish_reason: 'stop' }]
|
725 |
-
})}\n\n`;
|
726 |
-
return;
|
727 |
-
}
|
728 |
|
729 |
-
let
|
730 |
-
|
731 |
-
const
|
732 |
-
offset += chunkSize;
|
733 |
|
734 |
const openaiChunk = {
|
735 |
id: requestId,
|
@@ -739,25 +667,14 @@ class ResponseConverter {
|
|
739 |
choices: [{
|
740 |
index: 0,
|
741 |
delta: { content: chunk },
|
742 |
-
finish_reason:
|
743 |
}]
|
744 |
};
|
745 |
-
|
|
|
746 |
}
|
747 |
|
748 |
-
|
749 |
-
const finalChunk = {
|
750 |
-
id: requestId,
|
751 |
-
object: 'chat.completion.chunk',
|
752 |
-
created: Math.floor(Date.now() / 1000),
|
753 |
-
model: model,
|
754 |
-
choices: [{
|
755 |
-
index: 0,
|
756 |
-
delta: {}, // Delta for final chunk might be empty
|
757 |
-
finish_reason: 'stop' // Indicate end of stream
|
758 |
-
}]
|
759 |
-
};
|
760 |
-
yield `data: ${JSON.stringify(finalChunk)}\n\n`;
|
761 |
}
|
762 |
}
|
763 |
|
@@ -769,7 +686,7 @@ class GeminiRealtimeStreamParser {
|
|
769 |
this.response = response;
|
770 |
this.onChunk = onChunk;
|
771 |
this.buffer = '';
|
772 |
-
this.bufferLv = 0;
|
773 |
this.inString = false;
|
774 |
this.escapeNext = false;
|
775 |
this.decoder = new TextDecoder();
|
@@ -777,16 +694,11 @@ class GeminiRealtimeStreamParser {
|
|
777 |
|
778 |
async start() {
|
779 |
try {
|
780 |
-
const
|
781 |
-
|
782 |
-
const { done, value } = await reader.read();
|
783 |
-
if (done) break;
|
784 |
-
|
785 |
-
const text = this.decoder.decode(value, { stream: true });
|
786 |
await this.processText(text);
|
787 |
}
|
788 |
|
789 |
-
// Handle any remaining buffer after stream ends
|
790 |
await this.handleRemainingBuffer();
|
791 |
} catch (error) {
|
792 |
console.error('流式解析错误:', error);
|
@@ -796,24 +708,26 @@ class GeminiRealtimeStreamParser {
|
|
796 |
|
797 |
async processText(text) {
|
798 |
for (const char of text) {
|
799 |
-
// Handle escaped characters
|
800 |
if (this.escapeNext) {
|
801 |
-
this.
|
|
|
|
|
802 |
this.escapeNext = false;
|
803 |
continue;
|
804 |
}
|
805 |
|
806 |
-
|
807 |
-
if (char === '"') {
|
808 |
-
this.inString = !this.inString;
|
809 |
-
} else if (char === '\\' && this.inString) {
|
810 |
-
// Handle escape character itself within a string
|
811 |
this.escapeNext = true;
|
812 |
-
this.
|
|
|
|
|
813 |
continue;
|
814 |
}
|
815 |
|
816 |
-
|
|
|
|
|
|
|
817 |
if (!this.inString) {
|
818 |
if (char === '{' || char === '[') {
|
819 |
this.bufferLv++;
|
@@ -822,27 +736,33 @@ class GeminiRealtimeStreamParser {
|
|
822 |
}
|
823 |
}
|
824 |
|
825 |
-
this.
|
826 |
-
|
827 |
-
|
828 |
-
|
829 |
-
|
|
|
|
|
|
|
|
|
830 |
try {
|
831 |
const bufferJson = JSON.parse(this.buffer);
|
832 |
await this.onChunk(bufferJson);
|
833 |
} catch (parseError) {
|
834 |
console.error('解析Gemini流数据错误:', parseError);
|
835 |
-
// It's possible to get partial JSON or errors, log and clear buffer
|
836 |
}
|
837 |
-
|
|
|
838 |
}
|
839 |
}
|
840 |
}
|
841 |
|
842 |
async handleRemainingBuffer() {
|
843 |
-
if (this.buffer.trim()
|
844 |
try {
|
845 |
-
|
|
|
|
|
846 |
const bufferJson = JSON.parse(this.buffer);
|
847 |
await this.onChunk(bufferJson);
|
848 |
} catch (parseError) {
|
@@ -896,7 +816,7 @@ class ApiProxyService {
|
|
896 |
}
|
897 |
|
898 |
/**
|
899 |
-
* 处理聊天API
|
900 |
*/
|
901 |
async handleChatRequest(req, res) {
|
902 |
try {
|
@@ -920,7 +840,7 @@ class ApiProxyService {
|
|
920 |
const requestBody = this.requestBuilder.buildRequestBody(geminiMessages, params);
|
921 |
|
922 |
if (params.stream) {
|
923 |
-
const result = await this.
|
924 |
if (!result.success) {
|
925 |
res.status(result.status || 500).json({ error: result.error });
|
926 |
}
|
@@ -945,9 +865,7 @@ class ApiProxyService {
|
|
945 |
}
|
946 |
|
947 |
/**
|
948 |
-
*
|
949 |
-
* 如果客户端请求流式,则内部调用非流式 Gemini API,然后转为假流发送给客户端。
|
950 |
-
* 如果客户端请求非流式,则直接代理非流式请求。
|
951 |
*/
|
952 |
async handleFakeStreamChatRequest(req, res) {
|
953 |
try {
|
@@ -955,6 +873,7 @@ class ApiProxyService {
|
|
955 |
|
956 |
const params = MessageConverter.extractParams(req.body);
|
957 |
|
|
|
958 |
const geminiMessages = await MessageConverter.convertMessages(params.messages);
|
959 |
|
960 |
if (!geminiMessages || geminiMessages.length === 0) {
|
@@ -966,17 +885,17 @@ class ApiProxyService {
|
|
966 |
}
|
967 |
});
|
968 |
}
|
969 |
-
|
970 |
const requestBody = this.requestBuilder.buildRequestBody(geminiMessages, params);
|
971 |
|
972 |
if (params.stream) {
|
973 |
-
//
|
974 |
-
const result = await this.
|
975 |
if (!result.success) {
|
976 |
res.status(result.status || 500).json({ error: result.error });
|
977 |
}
|
978 |
} else {
|
979 |
-
//
|
980 |
const result = await this.executeNormalRequest(requestBody, params, requestId);
|
981 |
if (result.success) {
|
982 |
res.json(result.data);
|
@@ -997,12 +916,80 @@ class ApiProxyService {
|
|
997 |
}
|
998 |
|
999 |
/**
|
1000 |
-
*
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1001 |
*/
|
1002 |
-
async
|
1003 |
const maxRetries = 3;
|
1004 |
|
1005 |
-
|
1006 |
if (!apiKey) {
|
1007 |
return { success: false, error: '目前暂无可用的API Key', status: 503 };
|
1008 |
}
|
@@ -1019,40 +1006,30 @@ class ApiProxyService {
|
|
1019 |
if (response.status === 403) {
|
1020 |
this.config.markKeyAsInvalid(apiKey);
|
1021 |
if (retryCount < maxRetries) {
|
1022 |
-
|
1023 |
-
return await this.executeRealStreamRequest(requestBody, params, requestId, res, retryCount + 1);
|
1024 |
}
|
1025 |
-
return { success: false, error: 'API Key
|
1026 |
}
|
1027 |
|
1028 |
if (response.status === 429) {
|
1029 |
-
this.config.
|
1030 |
if (retryCount < maxRetries) {
|
1031 |
-
|
1032 |
-
return await this.executeRealStreamRequest(requestBody, params, requestId, res, retryCount + 1);
|
1033 |
}
|
1034 |
return { success: false, error: '请求频率过高,请稍后重试', status: 429 };
|
1035 |
}
|
1036 |
|
1037 |
-
if (response.status
|
1038 |
-
this.config.
|
1039 |
-
|
1040 |
-
console.log(`真实流式请求重试 ${retryCount + 1}/${maxRetries} (${response.status}错误)。`);
|
1041 |
-
return await this.executeRealStreamRequest(requestBody, params, requestId, res, retryCount + 1);
|
1042 |
-
}
|
1043 |
-
return { success: false, error: '目前服务器繁忙,请稍后重试', status: response.status };
|
1044 |
}
|
1045 |
|
1046 |
if (!response.ok) {
|
1047 |
const errorText = await response.text();
|
1048 |
console.error(`API请求失败: ${response.status}, 错误信息: ${errorText}`);
|
1049 |
-
|
1050 |
-
return { success: false, error: `API请求失败: ${response.status} - ${errorText.substring(0, 100)}`, status: response.status };
|
1051 |
}
|
1052 |
|
1053 |
-
// 如果请求成功,将 key 放入已使用池
|
1054 |
-
this.config.returnApiKeyToUsed(apiKey);
|
1055 |
-
|
1056 |
res.writeHead(200, {
|
1057 |
'Content-Type': 'text/event-stream',
|
1058 |
'Cache-Control': 'no-cache',
|
@@ -1075,11 +1052,10 @@ class ApiProxyService {
|
|
1075 |
|
1076 |
} catch (error) {
|
1077 |
console.error('执行流式请求错误:', error);
|
1078 |
-
this.config.
|
1079 |
|
1080 |
if (retryCount < maxRetries) {
|
1081 |
-
|
1082 |
-
return await this.executeRealStreamRequest(requestBody, params, requestId, res, retryCount + 1);
|
1083 |
}
|
1084 |
|
1085 |
return { success: false, error: '网络请求失败: ' + error.message, status: 500 };
|
@@ -1087,12 +1063,12 @@ class ApiProxyService {
|
|
1087 |
}
|
1088 |
|
1089 |
/**
|
1090 |
-
*
|
1091 |
*/
|
1092 |
async executeNormalRequest(requestBody, params, requestId, retryCount = 0) {
|
1093 |
const maxRetries = 3;
|
1094 |
|
1095 |
-
|
1096 |
if (!apiKey) {
|
1097 |
return { success: false, error: '目前暂无可用的API Key', status: 503 };
|
1098 |
}
|
@@ -1109,48 +1085,39 @@ class ApiProxyService {
|
|
1109 |
if (response.status === 403) {
|
1110 |
this.config.markKeyAsInvalid(apiKey);
|
1111 |
if (retryCount < maxRetries) {
|
1112 |
-
console.log(`非流式请求重试 ${retryCount + 1}/${maxRetries} (403错误)。`);
|
1113 |
return await this.executeNormalRequest(requestBody, params, requestId, retryCount + 1);
|
1114 |
}
|
1115 |
-
return { success: false, error: 'API Key
|
1116 |
}
|
1117 |
|
1118 |
if (response.status === 429) {
|
1119 |
-
this.config.
|
1120 |
if (retryCount < maxRetries) {
|
1121 |
-
console.log(`非流式请求重试 ${retryCount + 1}/${maxRetries} (429错误)。`);
|
1122 |
return await this.executeNormalRequest(requestBody, params, requestId, retryCount + 1);
|
1123 |
}
|
1124 |
return { success: false, error: '请求频率过高,请稍后重试', status: 429 };
|
1125 |
}
|
1126 |
|
1127 |
-
if (response.status
|
1128 |
-
this.config.
|
1129 |
-
|
1130 |
-
console.log(`非流式请求重试 ${retryCount + 1}/${maxRetries} (${response.status}错误)。`);
|
1131 |
-
return await this.executeNormalRequest(requestBody, params, requestId, retryCount + 1);
|
1132 |
-
}
|
1133 |
-
return { success: false, error: '目前服务器繁忙,请稍后重试', status: response.status };
|
1134 |
}
|
1135 |
|
1136 |
if (!response.ok) {
|
1137 |
const errorText = await response.text();
|
1138 |
console.error(`API请求失败: ${response.status}, 错误信息: ${errorText}`);
|
1139 |
-
|
1140 |
-
return { success: false, error: `API请求失败: ${response.status} - ${errorText.substring(0, 100)}`, status: response.status };
|
1141 |
}
|
1142 |
|
1143 |
-
this.config.returnApiKeyToUsed(apiKey);
|
1144 |
const geminiResponse = await response.json();
|
1145 |
const openaiResponse = ResponseConverter.convertNormalResponse(geminiResponse, requestId, params.model);
|
1146 |
return { success: true, data: openaiResponse };
|
1147 |
|
1148 |
} catch (error) {
|
1149 |
console.error('执行非流式请求错误:', error);
|
1150 |
-
this.config.
|
1151 |
|
1152 |
if (retryCount < maxRetries) {
|
1153 |
-
console.log(`非流式请求重试 ${retryCount + 1}/${maxRetries} (网络错误)。`);
|
1154 |
return await this.executeNormalRequest(requestBody, params, requestId, retryCount + 1);
|
1155 |
}
|
1156 |
|
@@ -1158,82 +1125,6 @@ class ApiProxyService {
|
|
1158 |
}
|
1159 |
}
|
1160 |
|
1161 |
-
/**
|
1162 |
-
* 执行假流式响应 (内部调用非流式 Gemini API, 然后转换为流式响应)
|
1163 |
-
*/
|
1164 |
-
async executeFakeStreamResponse(requestBody, params, requestId, res) {
|
1165 |
-
let pingInterval;
|
1166 |
-
const pingDelayMs = 1000; // 每1秒发送一次ping消息
|
1167 |
-
|
1168 |
-
try {
|
1169 |
-
res.writeHead(200, {
|
1170 |
-
'Content-Type': 'text/event-stream',
|
1171 |
-
'Cache-Control': 'no-cache',
|
1172 |
-
'Connection': 'keep-alive',
|
1173 |
-
'Access-Control-Allow-Origin': '*'
|
1174 |
-
});
|
1175 |
-
|
1176 |
-
// 开始发送ping消息
|
1177 |
-
pingInterval = setInterval(() => {
|
1178 |
-
const pingChunk = {
|
1179 |
-
id: requestId,
|
1180 |
-
object: 'chat.completion.chunk',
|
1181 |
-
created: Math.floor(Date.now() / 1000),
|
1182 |
-
model: params.model,
|
1183 |
-
choices: [{
|
1184 |
-
index: 0,
|
1185 |
-
delta: {}, // 空 delta 用于 ping
|
1186 |
-
finish_reason: null
|
1187 |
-
}]
|
1188 |
-
};
|
1189 |
-
// console.log('发送 ping 块...');
|
1190 |
-
res.write(`data: ${JSON.stringify(pingChunk)}\n\n`);
|
1191 |
-
}, pingDelayMs);
|
1192 |
-
|
1193 |
-
// 执行实际的非流式请求到 Gemini
|
1194 |
-
const result = await this.executeNormalRequest(requestBody, params, requestId);
|
1195 |
-
|
1196 |
-
// 请求完成后立即清除 ping 间隔
|
1197 |
-
clearInterval(pingInterval);
|
1198 |
-
pingInterval = null;
|
1199 |
-
|
1200 |
-
if (result.success) {
|
1201 |
-
const fullContent = result.data.choices[0]?.message?.content || '';
|
1202 |
-
|
1203 |
-
// 将完整内容分块并以假流形式发送
|
1204 |
-
for (const chunk of ResponseConverter.chunkTextForFakeStream(fullContent, requestId, params.model)) {
|
1205 |
-
res.write(chunk);
|
1206 |
-
// 可以添加一个小的延迟来模拟更真实的“流式”体验
|
1207 |
-
await new Promise(resolve => setTimeout(resolve, 50));
|
1208 |
-
}
|
1209 |
-
res.write('data: [DONE]\n\n');
|
1210 |
-
res.end();
|
1211 |
-
return { success: true };
|
1212 |
-
} else {
|
1213 |
-
// 如果非流式请求失败,发送错误块并结束流
|
1214 |
-
res.write(`data: ${JSON.stringify({ error: result.error })}\n\n`);
|
1215 |
-
res.write('data: [DONE]\n\n');
|
1216 |
-
res.end();
|
1217 |
-
return { success: false, error: result.error, status: result.status };
|
1218 |
-
}
|
1219 |
-
|
1220 |
-
} catch (error) {
|
1221 |
-
console.error('执行假流式请求错误:', error);
|
1222 |
-
if (pingInterval) {
|
1223 |
-
clearInterval(pingInterval);
|
1224 |
-
}
|
1225 |
-
// 如果在响应发送前或发送过程中出现错误
|
1226 |
-
res.status(500).json({
|
1227 |
-
error: {
|
1228 |
-
message: '内部服务器错误: ' + error.message,
|
1229 |
-
type: 'internal_server_error',
|
1230 |
-
code: 'server_error'
|
1231 |
-
}
|
1232 |
-
});
|
1233 |
-
return { success: false, error: '内部服务器错误: ' + error.message, status: 500 };
|
1234 |
-
}
|
1235 |
-
}
|
1236 |
-
|
1237 |
/**
|
1238 |
* 处理模型列表请求
|
1239 |
*/
|
@@ -1244,7 +1135,6 @@ class ApiProxyService {
|
|
1244 |
if (result.success) {
|
1245 |
res.json(result.data);
|
1246 |
} else {
|
1247 |
-
console.log("失败");
|
1248 |
res.status(result.status || 500).json({ error: result.error });
|
1249 |
}
|
1250 |
} catch (error) {
|
@@ -1292,35 +1182,36 @@ class Server {
|
|
1292 |
}
|
1293 |
|
1294 |
setupRoutes() {
|
1295 |
-
//
|
1296 |
this.app.post('/v1/chat/completions', (req, res) => {
|
1297 |
this.apiProxy.handleChatRequest(req, res);
|
1298 |
});
|
1299 |
|
1300 |
-
//
|
1301 |
this.app.post('/fakestream/v1/chat/completions', (req, res) => {
|
1302 |
this.apiProxy.handleFakeStreamChatRequest(req, res);
|
1303 |
});
|
1304 |
|
1305 |
-
//
|
1306 |
this.app.get('/v1/models', (req, res) => {
|
1307 |
this.apiProxy.handleModelsRequest(req, res);
|
1308 |
});
|
1309 |
-
|
|
|
1310 |
this.app.get('/fakestream/v1/models', (req, res) => {
|
1311 |
this.apiProxy.handleModelsRequest(req, res);
|
1312 |
-
});
|
|
|
1313 |
// 健康检查接口
|
1314 |
this.app.get('/health', (req, res) => {
|
1315 |
res.json({
|
1316 |
status: 'healthy',
|
1317 |
timestamp: new Date().toISOString(),
|
1318 |
-
availableKeys: this.apiProxy.config.apiKeys.length,
|
1319 |
-
usedKeys: this.apiProxy.config.usedApiKeys.length,
|
1320 |
-
invalidKeys: this.apiProxy.config.invalidApiKeys.length,
|
1321 |
-
|
1322 |
-
|
1323 |
-
features: ['text', 'vision', 'stream_real', 'stream_fake', 'load_balancing']
|
1324 |
});
|
1325 |
});
|
1326 |
|
@@ -1351,9 +1242,10 @@ class Server {
|
|
1351 |
start(port = 3000) {
|
1352 |
this.app.listen(port, () => {
|
1353 |
console.log(`🚀 OpenAI to Gemini Proxy Server (Enhanced) 启动在端口 ${port}`);
|
1354 |
-
console.log(`📍
|
1355 |
-
console.log(`📍 假流式聊天API
|
1356 |
console.log(`📋 模型列表: http://localhost:${port}/v1/models`);
|
|
|
1357 |
console.log(`🔍 健康检查: http://localhost:${port}/health`);
|
1358 |
});
|
1359 |
}
|
@@ -1362,4 +1254,4 @@ class Server {
|
|
1362 |
// 启动服务器
|
1363 |
const server = new Server();
|
1364 |
const port = process.env.PORT || 3000;
|
1365 |
-
server.start(port);
|
|
|
12 |
*/
|
13 |
class Config {
|
14 |
constructor() {
|
|
|
|
|
|
|
15 |
this.initializeApiKeys();
|
16 |
this.initializeAuth();
|
17 |
+
|
18 |
+
// 已使用的API Key池
|
19 |
this.usedApiKeys = [];
|
20 |
|
21 |
+
// 失效的API Key池
|
22 |
this.invalidApiKeys = [];
|
23 |
|
24 |
// Gemini安全设置
|
|
|
79 |
|
80 |
/**
|
81 |
* 获取可用的API Key(负载均衡)
|
|
|
82 |
*/
|
83 |
+
getApiKey() {
|
84 |
if (this.apiKeys.length === 0) {
|
85 |
if (this.usedApiKeys.length > 0) {
|
|
|
86 |
this.apiKeys.push(...this.usedApiKeys);
|
87 |
this.usedApiKeys = [];
|
|
|
88 |
} else {
|
89 |
+
return null;
|
90 |
}
|
91 |
}
|
92 |
+
|
93 |
+
const apiKey = this.apiKeys.shift();
|
94 |
+
this.usedApiKeys.push(apiKey);
|
95 |
+
return apiKey;
|
96 |
}
|
97 |
|
98 |
/**
|
99 |
+
* 获取第一个可用的API Key(用于模型列表请求)
|
100 |
*/
|
101 |
getFirstAvailableApiKey() {
|
102 |
if (this.apiKeys.length > 0) {
|
|
|
112 |
* 将API Key标记为失效
|
113 |
*/
|
114 |
markKeyAsInvalid(apiKey) {
|
115 |
+
const usedIndex = this.usedApiKeys.indexOf(apiKey);
|
116 |
+
if (usedIndex !== -1) {
|
117 |
+
this.usedApiKeys.splice(usedIndex, 1);
|
118 |
+
}
|
119 |
|
120 |
+
const mainIndex = this.apiKeys.indexOf(apiKey);
|
121 |
+
if (mainIndex !== -1) {
|
122 |
+
this.apiKeys.splice(mainIndex, 1);
|
123 |
+
}
|
124 |
+
|
125 |
+
if (!this.invalidApiKeys.includes(apiKey)) {
|
126 |
this.invalidApiKeys.push(apiKey);
|
127 |
}
|
128 |
|
129 |
+
console.warn(`⚠️ API Key 已标记为失效: ${apiKey.substring(0, 10)}...`);
|
|
|
130 |
}
|
131 |
|
132 |
/**
|
133 |
+
* 将API Key移回已使用池
|
134 |
*/
|
135 |
+
moveToUsed(apiKey) {
|
136 |
+
if (!this.usedApiKeys.includes(apiKey)) {
|
137 |
this.usedApiKeys.push(apiKey);
|
138 |
}
|
139 |
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
140 |
|
141 |
/**
|
142 |
* 验证授权头
|
|
|
264 |
|
265 |
// 角色转换
|
266 |
if (role === 'system') {
|
267 |
+
role = 'user';
|
|
|
|
|
268 |
}
|
269 |
|
270 |
if (role === 'assistant') {
|
|
|
282 |
parts = await this.convertContentArray(content);
|
283 |
} else {
|
284 |
// 其他格式,转为文本
|
|
|
285 |
parts = [{ text: String(content) }];
|
286 |
}
|
287 |
|
288 |
+
// 合并相同角色的连续消息
|
|
|
|
|
|
|
289 |
if (role === currentRole) {
|
290 |
currentParts.push(...parts);
|
291 |
} else {
|
|
|
338 |
}
|
339 |
} catch (error) {
|
340 |
console.error('转换内容项错误:', error);
|
341 |
+
// 出错时跳过该项,避免整个请求失败
|
342 |
+
continue;
|
343 |
}
|
344 |
}
|
345 |
|
|
|
423 |
|
424 |
const apiKey = this.config.getFirstAvailableApiKey();
|
425 |
if (!apiKey) {
|
|
|
426 |
return {
|
427 |
success: false,
|
428 |
error: '没有可用的API Key',
|
|
|
439 |
});
|
440 |
|
441 |
if (!response.ok) {
|
|
|
|
|
|
|
|
|
|
|
442 |
return {
|
443 |
success: false,
|
444 |
error: `获取模型列表失败: ${response.status}`,
|
|
|
471 |
const allowedPrefixes = [
|
472 |
'models/gemini-2.5-flash',
|
473 |
'models/gemini-2.0-flash',
|
474 |
+
'models/gemini-1.5-flash'
|
|
|
475 |
];
|
476 |
|
477 |
const excludedModels = [
|
478 |
+
'models/gemini-1.5-flash-8b'
|
479 |
];
|
480 |
|
481 |
const filteredModels = models.filter(model => {
|
482 |
const modelName = model.name;
|
483 |
|
|
|
484 |
if (excludedModels.some(excluded => modelName.startsWith(excluded))) {
|
485 |
return false;
|
486 |
}
|
487 |
+
if(modelName == "models/gemini-2.5-pro"){
|
|
|
|
|
488 |
return true;
|
489 |
}
|
490 |
|
|
|
491 |
return allowedPrefixes.some(prefix => modelName.startsWith(prefix));
|
492 |
});
|
493 |
|
|
|
597 |
return `data: ${JSON.stringify(openaiChunk)}\n\n`;
|
598 |
}
|
599 |
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
600 |
return '';
|
601 |
} catch (error) {
|
602 |
console.error('转换流响应块错误:', error);
|
|
|
631 |
role: 'assistant',
|
632 |
content: text
|
633 |
},
|
634 |
+
finish_reason: candidate.finishReason === 'STOP' ? 'stop' : 'length'
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
635 |
});
|
636 |
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
637 |
}
|
638 |
|
639 |
// 尝试从usage信息中获取token使用量
|
|
|
649 |
}
|
650 |
|
651 |
/**
|
652 |
+
* 将文本拆分为假流式块
|
653 |
*/
|
654 |
+
static splitTextToFakeStream(text, requestId, model) {
|
655 |
+
const chunks = [];
|
656 |
+
const chunkSize = 3; // 每个块包含的字符数
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
657 |
|
658 |
+
for (let i = 0; i < text.length; i += chunkSize) {
|
659 |
+
const chunk = text.slice(i, i + chunkSize);
|
660 |
+
const isLast = i + chunkSize >= text.length;
|
|
|
661 |
|
662 |
const openaiChunk = {
|
663 |
id: requestId,
|
|
|
667 |
choices: [{
|
668 |
index: 0,
|
669 |
delta: { content: chunk },
|
670 |
+
finish_reason: isLast ? 'stop' : null
|
671 |
}]
|
672 |
};
|
673 |
+
|
674 |
+
chunks.push(`data: ${JSON.stringify(openaiChunk)}\n\n`);
|
675 |
}
|
676 |
|
677 |
+
return chunks;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
678 |
}
|
679 |
}
|
680 |
|
|
|
686 |
this.response = response;
|
687 |
this.onChunk = onChunk;
|
688 |
this.buffer = '';
|
689 |
+
this.bufferLv = 0;
|
690 |
this.inString = false;
|
691 |
this.escapeNext = false;
|
692 |
this.decoder = new TextDecoder();
|
|
|
694 |
|
695 |
async start() {
|
696 |
try {
|
697 |
+
for await (const chunk of this.response.body) {
|
698 |
+
const text = this.decoder.decode(chunk, { stream: true });
|
|
|
|
|
|
|
|
|
699 |
await this.processText(text);
|
700 |
}
|
701 |
|
|
|
702 |
await this.handleRemainingBuffer();
|
703 |
} catch (error) {
|
704 |
console.error('流式解析错误:', error);
|
|
|
708 |
|
709 |
async processText(text) {
|
710 |
for (const char of text) {
|
|
|
711 |
if (this.escapeNext) {
|
712 |
+
if (this.bufferLv > 1) {
|
713 |
+
this.buffer += char;
|
714 |
+
}
|
715 |
this.escapeNext = false;
|
716 |
continue;
|
717 |
}
|
718 |
|
719 |
+
if (char === '\\' && this.inString) {
|
|
|
|
|
|
|
|
|
720 |
this.escapeNext = true;
|
721 |
+
if (this.bufferLv > 1) {
|
722 |
+
this.buffer += char;
|
723 |
+
}
|
724 |
continue;
|
725 |
}
|
726 |
|
727 |
+
if (char === '"') {
|
728 |
+
this.inString = !this.inString;
|
729 |
+
}
|
730 |
+
|
731 |
if (!this.inString) {
|
732 |
if (char === '{' || char === '[') {
|
733 |
this.bufferLv++;
|
|
|
736 |
}
|
737 |
}
|
738 |
|
739 |
+
if (this.bufferLv > 1) {
|
740 |
+
if (this.inString && char === '\n') {
|
741 |
+
this.buffer += '\\n';
|
742 |
+
} else {
|
743 |
+
this.buffer += char;
|
744 |
+
}
|
745 |
+
} else if (this.bufferLv === 1 && this.buffer) {
|
746 |
+
this.buffer += '}';
|
747 |
+
|
748 |
try {
|
749 |
const bufferJson = JSON.parse(this.buffer);
|
750 |
await this.onChunk(bufferJson);
|
751 |
} catch (parseError) {
|
752 |
console.error('解析Gemini流数据错误:', parseError);
|
|
|
753 |
}
|
754 |
+
|
755 |
+
this.buffer = '';
|
756 |
}
|
757 |
}
|
758 |
}
|
759 |
|
760 |
async handleRemainingBuffer() {
|
761 |
+
if (this.buffer.trim() && this.bufferLv >= 1) {
|
762 |
try {
|
763 |
+
if (!this.buffer.endsWith('}')) {
|
764 |
+
this.buffer += '}';
|
765 |
+
}
|
766 |
const bufferJson = JSON.parse(this.buffer);
|
767 |
await this.onChunk(bufferJson);
|
768 |
} catch (parseError) {
|
|
|
816 |
}
|
817 |
|
818 |
/**
|
819 |
+
* 处理聊天API请求(支持图片)
|
820 |
*/
|
821 |
async handleChatRequest(req, res) {
|
822 |
try {
|
|
|
840 |
const requestBody = this.requestBuilder.buildRequestBody(geminiMessages, params);
|
841 |
|
842 |
if (params.stream) {
|
843 |
+
const result = await this.handleStreamRequest(requestBody, params, requestId, res);
|
844 |
if (!result.success) {
|
845 |
res.status(result.status || 500).json({ error: result.error });
|
846 |
}
|
|
|
865 |
}
|
866 |
|
867 |
/**
|
868 |
+
* 处理假流式聊天API请求
|
|
|
|
|
869 |
*/
|
870 |
async handleFakeStreamChatRequest(req, res) {
|
871 |
try {
|
|
|
873 |
|
874 |
const params = MessageConverter.extractParams(req.body);
|
875 |
|
876 |
+
// 异步转换消息(支持图片处理)
|
877 |
const geminiMessages = await MessageConverter.convertMessages(params.messages);
|
878 |
|
879 |
if (!geminiMessages || geminiMessages.length === 0) {
|
|
|
885 |
}
|
886 |
});
|
887 |
}
|
888 |
+
|
889 |
const requestBody = this.requestBuilder.buildRequestBody(geminiMessages, params);
|
890 |
|
891 |
if (params.stream) {
|
892 |
+
// 假流式处理:使用非流式请求,然后模拟流式响应
|
893 |
+
const result = await this.handleFakeStreamRequest(requestBody, params, requestId, res);
|
894 |
if (!result.success) {
|
895 |
res.status(result.status || 500).json({ error: result.error });
|
896 |
}
|
897 |
} else {
|
898 |
+
// 非流式请求和原来一样
|
899 |
const result = await this.executeNormalRequest(requestBody, params, requestId);
|
900 |
if (result.success) {
|
901 |
res.json(result.data);
|
|
|
916 |
}
|
917 |
|
918 |
/**
|
919 |
+
* 处理假流式请求
|
920 |
+
*/
|
921 |
+
async handleFakeStreamRequest(requestBody, params, requestId, res) {
|
922 |
+
try {
|
923 |
+
// 设置流式响应头
|
924 |
+
res.writeHead(200, {
|
925 |
+
'Content-Type': 'text/event-stream',
|
926 |
+
'Cache-Control': 'no-cache',
|
927 |
+
'Connection': 'keep-alive',
|
928 |
+
'Access-Control-Allow-Origin': '*'
|
929 |
+
});
|
930 |
+
|
931 |
+
// 开始发送ping消息保持连接活跃
|
932 |
+
const pingInterval = setInterval(() => {
|
933 |
+
try {
|
934 |
+
res.write(': ping\n\n');
|
935 |
+
} catch (error) {
|
936 |
+
clearInterval(pingInterval);
|
937 |
+
}
|
938 |
+
}, 1000); // 每秒发送一次ping
|
939 |
+
|
940 |
+
// 执行非流式请求
|
941 |
+
const result = await this.executeNormalRequest(requestBody, params, requestId);
|
942 |
+
|
943 |
+
// 停止ping
|
944 |
+
clearInterval(pingInterval);
|
945 |
+
|
946 |
+
if (!result.success) {
|
947 |
+
res.write(`data: ${JSON.stringify({ error: result.error })}\n\n`);
|
948 |
+
res.write('data: [DONE]\n\n');
|
949 |
+
res.end();
|
950 |
+
return { success: false };
|
951 |
+
}
|
952 |
+
|
953 |
+
// 获取响应文本
|
954 |
+
const responseText = result.data.choices[0]?.message?.content || '';
|
955 |
+
|
956 |
+
if (responseText) {
|
957 |
+
// 将文本拆分为假流式块
|
958 |
+
const chunks = ResponseConverter.splitTextToFakeStream(responseText, requestId, params.model);
|
959 |
+
|
960 |
+
// 逐步发送块,模拟流式响应
|
961 |
+
for (const chunk of chunks) {
|
962 |
+
res.write(chunk);
|
963 |
+
// 添加小延迟以模拟真实的流式响应
|
964 |
+
await new Promise(resolve => setTimeout(resolve, 50));
|
965 |
+
}
|
966 |
+
}
|
967 |
+
|
968 |
+
res.write('data: [DONE]\n\n');
|
969 |
+
res.end();
|
970 |
+
|
971 |
+
return { success: true };
|
972 |
+
|
973 |
+
} catch (error) {
|
974 |
+
console.error('处理假流式请求错误:', error);
|
975 |
+
try {
|
976 |
+
res.write(`data: ${JSON.stringify({ error: '内部服务器错误: ' + error.message })}\n\n`);
|
977 |
+
res.write('data: [DONE]\n\n');
|
978 |
+
res.end();
|
979 |
+
} catch (writeError) {
|
980 |
+
console.error('写入错误响应失败:', writeError);
|
981 |
+
}
|
982 |
+
return { success: false, error: error.message };
|
983 |
+
}
|
984 |
+
}
|
985 |
+
|
986 |
+
/**
|
987 |
+
* 处理流式请求
|
988 |
*/
|
989 |
+
async handleStreamRequest(requestBody, params, requestId, res, retryCount = 0) {
|
990 |
const maxRetries = 3;
|
991 |
|
992 |
+
const apiKey = this.config.getApiKey();
|
993 |
if (!apiKey) {
|
994 |
return { success: false, error: '目前暂无可用的API Key', status: 503 };
|
995 |
}
|
|
|
1006 |
if (response.status === 403) {
|
1007 |
this.config.markKeyAsInvalid(apiKey);
|
1008 |
if (retryCount < maxRetries) {
|
1009 |
+
return await this.handleStreamRequest(requestBody, params, requestId, res, retryCount + 1);
|
|
|
1010 |
}
|
1011 |
+
return { success: false, error: 'API Key 无效', status: 403 };
|
1012 |
}
|
1013 |
|
1014 |
if (response.status === 429) {
|
1015 |
+
this.config.moveToUsed(apiKey);
|
1016 |
if (retryCount < maxRetries) {
|
1017 |
+
return await this.handleStreamRequest(requestBody, params, requestId, res, retryCount + 1);
|
|
|
1018 |
}
|
1019 |
return { success: false, error: '请求频率过高,请稍后重试', status: 429 };
|
1020 |
}
|
1021 |
|
1022 |
+
if (response.status === 500) {
|
1023 |
+
this.config.moveToUsed(apiKey);
|
1024 |
+
return { success: false, error: '目前服务器繁忙,请稍后重试', status: 500 };
|
|
|
|
|
|
|
|
|
1025 |
}
|
1026 |
|
1027 |
if (!response.ok) {
|
1028 |
const errorText = await response.text();
|
1029 |
console.error(`API请求失败: ${response.status}, 错误信息: ${errorText}`);
|
1030 |
+
return { success: false, error: `API请求失败: ${response.status}`, status: response.status };
|
|
|
1031 |
}
|
1032 |
|
|
|
|
|
|
|
1033 |
res.writeHead(200, {
|
1034 |
'Content-Type': 'text/event-stream',
|
1035 |
'Cache-Control': 'no-cache',
|
|
|
1052 |
|
1053 |
} catch (error) {
|
1054 |
console.error('执行流式请求错误:', error);
|
1055 |
+
this.config.moveToUsed(apiKey);
|
1056 |
|
1057 |
if (retryCount < maxRetries) {
|
1058 |
+
return await this.handleStreamRequest(requestBody, params, requestId, res, retryCount + 1);
|
|
|
1059 |
}
|
1060 |
|
1061 |
return { success: false, error: '网络请求失败: ' + error.message, status: 500 };
|
|
|
1063 |
}
|
1064 |
|
1065 |
/**
|
1066 |
+
* 处理非流式请求
|
1067 |
*/
|
1068 |
async executeNormalRequest(requestBody, params, requestId, retryCount = 0) {
|
1069 |
const maxRetries = 3;
|
1070 |
|
1071 |
+
const apiKey = this.config.getApiKey();
|
1072 |
if (!apiKey) {
|
1073 |
return { success: false, error: '目前暂无可用的API Key', status: 503 };
|
1074 |
}
|
|
|
1085 |
if (response.status === 403) {
|
1086 |
this.config.markKeyAsInvalid(apiKey);
|
1087 |
if (retryCount < maxRetries) {
|
|
|
1088 |
return await this.executeNormalRequest(requestBody, params, requestId, retryCount + 1);
|
1089 |
}
|
1090 |
+
return { success: false, error: 'API Key 无效', status: 403 };
|
1091 |
}
|
1092 |
|
1093 |
if (response.status === 429) {
|
1094 |
+
this.config.moveToUsed(apiKey);
|
1095 |
if (retryCount < maxRetries) {
|
|
|
1096 |
return await this.executeNormalRequest(requestBody, params, requestId, retryCount + 1);
|
1097 |
}
|
1098 |
return { success: false, error: '请求频率过高,请稍后重试', status: 429 };
|
1099 |
}
|
1100 |
|
1101 |
+
if (response.status === 500) {
|
1102 |
+
this.config.moveToUsed(apiKey);
|
1103 |
+
return { success: false, error: '目前服务器繁忙,请稍后重试', status: 500 };
|
|
|
|
|
|
|
|
|
1104 |
}
|
1105 |
|
1106 |
if (!response.ok) {
|
1107 |
const errorText = await response.text();
|
1108 |
console.error(`API请求失败: ${response.status}, 错误信息: ${errorText}`);
|
1109 |
+
return { success: false, error: `API请求失败: ${response.status}`, status: response.status };
|
|
|
1110 |
}
|
1111 |
|
|
|
1112 |
const geminiResponse = await response.json();
|
1113 |
const openaiResponse = ResponseConverter.convertNormalResponse(geminiResponse, requestId, params.model);
|
1114 |
return { success: true, data: openaiResponse };
|
1115 |
|
1116 |
} catch (error) {
|
1117 |
console.error('执行非流式请求错误:', error);
|
1118 |
+
this.config.moveToUsed(apiKey);
|
1119 |
|
1120 |
if (retryCount < maxRetries) {
|
|
|
1121 |
return await this.executeNormalRequest(requestBody, params, requestId, retryCount + 1);
|
1122 |
}
|
1123 |
|
|
|
1125 |
}
|
1126 |
}
|
1127 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1128 |
/**
|
1129 |
* 处理模型列表请求
|
1130 |
*/
|
|
|
1135 |
if (result.success) {
|
1136 |
res.json(result.data);
|
1137 |
} else {
|
|
|
1138 |
res.status(result.status || 500).json({ error: result.error });
|
1139 |
}
|
1140 |
} catch (error) {
|
|
|
1182 |
}
|
1183 |
|
1184 |
setupRoutes() {
|
1185 |
+
// 原始聊天接口(支持图片)
|
1186 |
this.app.post('/v1/chat/completions', (req, res) => {
|
1187 |
this.apiProxy.handleChatRequest(req, res);
|
1188 |
});
|
1189 |
|
1190 |
+
// 假流式聊天接口
|
1191 |
this.app.post('/fakestream/v1/chat/completions', (req, res) => {
|
1192 |
this.apiProxy.handleFakeStreamChatRequest(req, res);
|
1193 |
});
|
1194 |
|
1195 |
+
// 原始模型列表接口
|
1196 |
this.app.get('/v1/models', (req, res) => {
|
1197 |
this.apiProxy.handleModelsRequest(req, res);
|
1198 |
});
|
1199 |
+
|
1200 |
+
// 假流式模型列表接口(逻辑相同)
|
1201 |
this.app.get('/fakestream/v1/models', (req, res) => {
|
1202 |
this.apiProxy.handleModelsRequest(req, res);
|
1203 |
+
});
|
1204 |
+
|
1205 |
// 健康检查接口
|
1206 |
this.app.get('/health', (req, res) => {
|
1207 |
res.json({
|
1208 |
status: 'healthy',
|
1209 |
timestamp: new Date().toISOString(),
|
1210 |
+
availableKeys: this.apiProxy.config.apiKeys.length,
|
1211 |
+
usedKeys: this.apiProxy.config.usedApiKeys.length,
|
1212 |
+
invalidKeys: this.apiProxy.config.invalidApiKeys.length,
|
1213 |
+
version: '2.0.0',
|
1214 |
+
features: ['text', 'vision', 'stream', 'fake_stream', 'load_balancing']
|
|
|
1215 |
});
|
1216 |
});
|
1217 |
|
|
|
1242 |
start(port = 3000) {
|
1243 |
this.app.listen(port, () => {
|
1244 |
console.log(`🚀 OpenAI to Gemini Proxy Server (Enhanced) 启动在端口 ${port}`);
|
1245 |
+
console.log(`📍 聊天API: http://localhost:${port}/v1/chat/completions`);
|
1246 |
+
console.log(`📍 假流式聊天API: http://localhost:${port}/fakestream/v1/chat/completions`);
|
1247 |
console.log(`📋 模型列表: http://localhost:${port}/v1/models`);
|
1248 |
+
console.log(`📋 假流式模型列表: http://localhost:${port}/fakestream/v1/models`);
|
1249 |
console.log(`🔍 健康检查: http://localhost:${port}/health`);
|
1250 |
});
|
1251 |
}
|
|
|
1254 |
// 启动服务器
|
1255 |
const server = new Server();
|
1256 |
const port = process.env.PORT || 3000;
|
1257 |
+
server.start(port);
|