Update api/index.js
Browse files- api/index.js +95 -45
api/index.js
CHANGED
@@ -59,11 +59,69 @@ const withAuth = (request) => {
|
|
59 |
}
|
60 |
}
|
61 |
};
|
|
|
62 |
// 返回运行信息
|
63 |
const logger = (res, req) => {
|
64 |
console.log(req.method, res.status, req.url, Date.now() - req.start, 'ms');
|
65 |
};
|
66 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
67 |
const router = AutoRouter({
|
68 |
before: [preflight], // 只保留 CORS preflight 检查
|
69 |
missing: () => error(404, '404 not found.'),
|
@@ -74,14 +132,14 @@ const router = AutoRouter({
|
|
74 |
router.get('/', () => json({
|
75 |
service: "AI Chat Completion Proxy",
|
76 |
usage: {
|
77 |
-
endpoint: "/v1/chat/completions",
|
78 |
method: "POST",
|
79 |
headers: {
|
80 |
"Content-Type": "application/json",
|
81 |
"Authorization": "Bearer YOUR_API_KEY"
|
82 |
},
|
83 |
body: {
|
84 |
-
model: "One of:
|
85 |
messages: [
|
86 |
{ role: "system", content: "You are a helpful assistant." },
|
87 |
{ role: "user", content: "Hello, who are you?" }
|
@@ -91,43 +149,29 @@ router.get('/', () => json({
|
|
91 |
top_p: 1
|
92 |
}
|
93 |
},
|
94 |
-
availableModels: [
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
-
|
99 |
-
"gpt-3.5-turbo",
|
100 |
-
"claude-3-sonnet@20240229",
|
101 |
-
"claude-3-opus@20240229",
|
102 |
-
"claude-3-haiku@20240307",
|
103 |
-
"claude-3-5-sonnet@20240620",
|
104 |
-
"gemini-1.5-flash",
|
105 |
-
"gemini-1.5-pro",
|
106 |
-
"chat-bison",
|
107 |
-
"codechat-bison"
|
108 |
-
],
|
109 |
note: "Replace YOUR_API_KEY with your actual API key."
|
110 |
}));
|
111 |
|
112 |
// models 路由
|
113 |
router.get(config.API_PREFIX + '/v1/models', withAuth, () =>
|
114 |
json({
|
115 |
-
|
116 |
-
|
117 |
-
|
118 |
-
|
119 |
-
|
120 |
-
|
121 |
-
|
122 |
-
|
123 |
-
|
124 |
-
|
125 |
-
|
126 |
-
|
127 |
-
{ id: "gemini-1.5-pro", object: "model", owned_by: "pieces-os" },
|
128 |
-
{ id: "chat-bison", object: "model", owned_by: "pieces-os" },
|
129 |
-
{ id: "codechat-bison", object: "model", owned_by: "pieces-os" },
|
130 |
-
],
|
131 |
})
|
132 |
);
|
133 |
|
@@ -307,19 +351,25 @@ function ChatCompletionStreamWithModel(text, model) {
|
|
307 |
}
|
308 |
|
309 |
async function handleCompletion(request) {
|
310 |
-
|
311 |
-
|
312 |
-
|
313 |
-
|
314 |
-
|
315 |
-
|
316 |
-
|
317 |
-
console.log(rules,content)
|
318 |
-
// 响应码,回复的消息
|
319 |
-
return await GrpcToPieces(inputModel, content, rules, stream, temperature, top_p);
|
320 |
-
} catch (err) {
|
321 |
-
return error(500, err.message);
|
322 |
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
323 |
}
|
324 |
|
325 |
(async () => {
|
|
|
59 |
}
|
60 |
}
|
61 |
};
|
62 |
+
|
63 |
// 返回运行信息
|
64 |
const logger = (res, req) => {
|
65 |
console.log(req.method, res.status, req.url, Date.now() - req.start, 'ms');
|
66 |
};
|
67 |
|
68 |
+
// 定义模型映射信息
|
69 |
+
const MODEL_INFO = {
|
70 |
+
"claude-3-sonnet-20240229": {
|
71 |
+
"provider": "anthropic",
|
72 |
+
"mapping": "claude-3-sonnet@20240229"
|
73 |
+
},
|
74 |
+
"claude-3-opus-20240229": {
|
75 |
+
"provider": "anthropic",
|
76 |
+
"mapping": "claude-3-opus@20240229"
|
77 |
+
},
|
78 |
+
"claude-3-haiku-20240307": {
|
79 |
+
"provider": "anthropic",
|
80 |
+
"mapping": "claude-3-haiku@20240307"
|
81 |
+
},
|
82 |
+
"claude-3-5-sonnet-20240620": {
|
83 |
+
"provider": "anthropic",
|
84 |
+
"mapping": "claude-3-5-sonnet@20240620"
|
85 |
+
},
|
86 |
+
"gpt-4o-mini": {
|
87 |
+
"provider": "openai",
|
88 |
+
"mapping": "gpt-4o-mini"
|
89 |
+
},
|
90 |
+
"gpt-4o": {
|
91 |
+
"provider": "openai",
|
92 |
+
"mapping": "gpt-4o"
|
93 |
+
},
|
94 |
+
"gpt-4-turbo": {
|
95 |
+
"provider": "openai",
|
96 |
+
"mapping": "gpt-4-turbo"
|
97 |
+
},
|
98 |
+
"gpt-4": {
|
99 |
+
"provider": "openai",
|
100 |
+
"mapping": "gpt-4"
|
101 |
+
},
|
102 |
+
"gpt-3.5-turbo": {
|
103 |
+
"provider": "openai",
|
104 |
+
"mapping": "gpt-3.5-turbo"
|
105 |
+
},
|
106 |
+
"gemini-1.5-pro": {
|
107 |
+
"provider": "google",
|
108 |
+
"mapping": "gemini-1.5-pro"
|
109 |
+
},
|
110 |
+
"gemini-1.5-flash": {
|
111 |
+
"provider": "google",
|
112 |
+
"mapping": "gemini-1.5-flash"
|
113 |
+
},
|
114 |
+
"chat-bison": {
|
115 |
+
"provider": "pieces-os",
|
116 |
+
"mapping": "chat-bison"
|
117 |
+
},
|
118 |
+
"codechat-bison": {
|
119 |
+
"provider": "pieces-os",
|
120 |
+
"mapping": "codechat-bison"
|
121 |
+
}
|
122 |
+
};
|
123 |
+
|
124 |
+
// 定义路由
|
125 |
const router = AutoRouter({
|
126 |
before: [preflight], // 只保留 CORS preflight 检查
|
127 |
missing: () => error(404, '404 not found.'),
|
|
|
132 |
router.get('/', () => json({
|
133 |
service: "AI Chat Completion Proxy",
|
134 |
usage: {
|
135 |
+
endpoint: "/v1/chat/completions",
|
136 |
method: "POST",
|
137 |
headers: {
|
138 |
"Content-Type": "application/json",
|
139 |
"Authorization": "Bearer YOUR_API_KEY"
|
140 |
},
|
141 |
body: {
|
142 |
+
model: "One of: " + Object.keys(MODEL_INFO).join(", "),
|
143 |
messages: [
|
144 |
{ role: "system", content: "You are a helpful assistant." },
|
145 |
{ role: "user", content: "Hello, who are you?" }
|
|
|
149 |
top_p: 1
|
150 |
}
|
151 |
},
|
152 |
+
availableModels: Object.entries(MODEL_INFO).map(([modelId, info]) => ({
|
153 |
+
id: modelId,
|
154 |
+
provider: info.provider,
|
155 |
+
mapping: info.mapping
|
156 |
+
})),
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
157 |
note: "Replace YOUR_API_KEY with your actual API key."
|
158 |
}));
|
159 |
|
160 |
// models 路由
|
161 |
router.get(config.API_PREFIX + '/v1/models', withAuth, () =>
|
162 |
json({
|
163 |
+
object: "list",
|
164 |
+
data: Object.entries(MODEL_INFO).map(([modelId, info]) => ({
|
165 |
+
id: modelId,
|
166 |
+
object: "model",
|
167 |
+
created: Date.now(),
|
168 |
+
owned_by: "pieces-os",
|
169 |
+
permission: [],
|
170 |
+
root: modelId,
|
171 |
+
parent: null,
|
172 |
+
mapping: info.mapping,
|
173 |
+
provider: info.provider
|
174 |
+
}))
|
|
|
|
|
|
|
|
|
175 |
})
|
176 |
);
|
177 |
|
|
|
351 |
}
|
352 |
|
353 |
async function handleCompletion(request) {
|
354 |
+
try {
|
355 |
+
const { model: inputModel, messages, stream, temperature, top_p } = await request.json();
|
356 |
+
|
357 |
+
// 获取模型映射
|
358 |
+
const modelInfo = MODEL_INFO[inputModel];
|
359 |
+
if (!modelInfo) {
|
360 |
+
return error(400, `Unsupported model: ${inputModel}`);
|
|
|
|
|
|
|
|
|
|
|
361 |
}
|
362 |
+
|
363 |
+
const mappedModel = modelInfo.mapping;
|
364 |
+
|
365 |
+
// 解析 system 和 user/assistant 消息
|
366 |
+
const { rules, message: content } = await messagesProcess(messages);
|
367 |
+
|
368 |
+
// 使用映射后的模型名称
|
369 |
+
return await GrpcToPieces(mappedModel, content, rules, stream, temperature, top_p);
|
370 |
+
} catch (err) {
|
371 |
+
return error(500, err.message);
|
372 |
+
}
|
373 |
}
|
374 |
|
375 |
(async () => {
|