Update main.ts
Browse files
main.ts
CHANGED
@@ -1,799 +1,352 @@
|
|
1 |
-
import { serve } from "https://deno.land/std@0.
|
2 |
-
|
3 |
-
//
|
4 |
-
const
|
5 |
-
|
6 |
-
|
7 |
-
const
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
id: number;
|
28 |
-
serviceUuid: string | null;
|
29 |
-
projectUuid: string;
|
30 |
-
displayName: string;
|
31 |
-
modelFamily: string;
|
32 |
-
available: boolean;
|
33 |
-
isBaseModel: boolean;
|
34 |
-
features: any;
|
35 |
-
supportedStream: boolean;
|
36 |
-
supportedImageInputs: boolean;
|
37 |
-
schema: Array<{
|
38 |
-
name: string;
|
39 |
-
displayName: string;
|
40 |
-
des: string;
|
41 |
-
type: string;
|
42 |
-
range: number[];
|
43 |
-
defaultValue: number;
|
44 |
-
required: boolean;
|
45 |
-
}>;
|
46 |
-
}
|
47 |
-
|
48 |
-
interface ModelsResponse {
|
49 |
-
status: number;
|
50 |
-
message: string;
|
51 |
-
result: SophNetModel[];
|
52 |
-
timestamp: number;
|
53 |
-
}
|
54 |
-
|
55 |
-
interface TokenInfo {
|
56 |
-
token: string;
|
57 |
-
expires: string;
|
58 |
-
}
|
59 |
-
|
60 |
-
interface Message {
|
61 |
-
role: string;
|
62 |
-
content: string;
|
63 |
-
}
|
64 |
-
|
65 |
-
interface Reference {
|
66 |
-
content: string;
|
67 |
-
id: string;
|
68 |
-
index: number;
|
69 |
-
title: string;
|
70 |
-
type: string;
|
71 |
-
url: string;
|
72 |
-
}
|
73 |
-
|
74 |
-
// 随机生成一个用户代理字符串
|
75 |
-
function getRandomUserAgent(): string {
|
76 |
-
const userAgents = [
|
77 |
-
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/108.0.0.0 Safari/537.36",
|
78 |
-
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/16.1 Safari/605.1.15",
|
79 |
-
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36 Edg/119.0.2151.44",
|
80 |
-
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36",
|
81 |
-
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36",
|
82 |
-
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36",
|
83 |
-
"Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:109.0) Gecko/20100101 Firefox/119.0",
|
84 |
-
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:109.0) Gecko/20100101 Firefox/119.0",
|
85 |
-
"Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/119.0",
|
86 |
-
];
|
87 |
-
return userAgents[Math.floor(Math.random() * userAgents.length)];
|
88 |
-
}
|
89 |
-
|
90 |
-
// 计算指数退避延迟
|
91 |
-
function getExponentialBackoffDelay(retryCount: number): number {
|
92 |
-
const delay = INITIAL_RETRY_DELAY_MS * Math.pow(2, retryCount);
|
93 |
-
const jitter = Math.random() * INITIAL_RETRY_DELAY_MS; // 添加随机抖动
|
94 |
-
return Math.min(delay + jitter, MAX_RETRY_DELAY_MS);
|
95 |
-
}
|
96 |
-
|
97 |
-
// 延迟函数
|
98 |
-
function sleep(ms: number): Promise<void> {
|
99 |
-
return new Promise(resolve => setTimeout(resolve, ms));
|
100 |
-
}
|
101 |
-
|
102 |
-
// 从KV获取token
|
103 |
-
async function getTokenFromKV(): Promise<TokenInfo | null> {
|
104 |
-
const tokenEntry = await kv.get<TokenInfo>([TOKEN_KEY]);
|
105 |
-
return tokenEntry.value;
|
106 |
-
}
|
107 |
-
|
108 |
-
// 存储token到KV
|
109 |
-
async function storeTokenToKV(token: string, expires: string): Promise<void> {
|
110 |
-
await kv.set([TOKEN_KEY], { token, expires });
|
111 |
-
}
|
112 |
-
|
113 |
-
// 获取匿名token
|
114 |
-
async function getAnonymousToken(retryCount = 0): Promise<string> {
|
115 |
-
try {
|
116 |
-
const response = await fetch(`${SOPHNET_BASE_URL}/sys/login/anonymous`, {
|
117 |
-
method: "GET",
|
118 |
-
headers: {
|
119 |
-
"Accept": "application/json",
|
120 |
-
"User-Agent": getRandomUserAgent(), // 使用随机UA
|
121 |
-
},
|
122 |
-
});
|
123 |
-
|
124 |
-
// 如果是 429 或 500 错误,进行重试
|
125 |
-
if ((response.status === 429 || response.status >= 500) && retryCount < MAX_RETRIES) {
|
126 |
-
const delay = getExponentialBackoffDelay(retryCount);
|
127 |
-
console.warn(`Get token failed with status ${response.status}. Retrying in ${delay}ms... (${retryCount + 1}/${MAX_RETRIES})`);
|
128 |
-
await sleep(delay);
|
129 |
-
return getAnonymousToken(retryCount + 1);
|
130 |
-
}
|
131 |
-
|
132 |
-
if (!response.ok) {
|
133 |
-
throw new Error(`Failed to get token: ${response.status}`);
|
134 |
-
}
|
135 |
-
|
136 |
-
const data = await response.json() as AnonymousTokenResponse;
|
137 |
-
await storeTokenToKV(data.result.anonymousToken, data.result.expires);
|
138 |
-
return data.result.anonymousToken;
|
139 |
-
} catch (error) {
|
140 |
-
console.error("Error getting anonymous token:", error);
|
141 |
-
throw error;
|
142 |
}
|
|
|
143 |
}
|
144 |
|
145 |
-
//
|
146 |
-
async function
|
147 |
-
|
148 |
-
|
149 |
-
|
150 |
-
// 如果KV中有token且未过期,则使用该token
|
151 |
-
if (tokenInfo && new Date(tokenInfo.expires) > new Date()) {
|
152 |
-
return tokenInfo.token;
|
153 |
}
|
154 |
|
155 |
-
//
|
156 |
-
|
157 |
-
|
158 |
-
|
159 |
-
//
|
160 |
-
|
161 |
-
|
162 |
-
const response = await fetch(
|
163 |
-
`${SOPHNET_BASE_URL}/public/playground/models?projectUuid=${PROJECT_UUID}`,
|
164 |
{
|
165 |
-
|
166 |
-
headers: {
|
167 |
-
"Accept": "application/json",
|
168 |
-
"User-Agent": getRandomUserAgent(), // 使用随机UA
|
169 |
-
"Authorization": `Bearer anon-${token}`,
|
170 |
-
},
|
171 |
-
},
|
172 |
-
);
|
173 |
-
|
174 |
-
// 如果是401或403错误,尝试刷新token并重试
|
175 |
-
if ((response.status === 401 || response.status === 403) && retryCount < MAX_RETRIES) {
|
176 |
-
console.log(`Token expired, refreshing and retrying models request (${retryCount + 1}/${MAX_RETRIES})...`);
|
177 |
-
const newToken = await getAnonymousToken();
|
178 |
-
return await getModels(newToken, retryCount + 1);
|
179 |
-
}
|
180 |
-
|
181 |
-
// 如果是 429 或 500 错误,进行重试
|
182 |
-
if ((response.status === 429 || response.status >= 500) && retryCount < MAX_RETRIES) {
|
183 |
-
const delay = getExponentialBackoffDelay(retryCount);
|
184 |
-
console.warn(`Get models failed with status ${response.status}. Retrying in ${delay}ms... (${retryCount + 1}/${MAX_RETRIES})`);
|
185 |
-
await sleep(delay);
|
186 |
-
return getModels(token, retryCount + 1); // 使用当前token重试,如果失败会在上面的逻辑中刷新
|
187 |
-
}
|
188 |
-
|
189 |
-
if (!response.ok) {
|
190 |
-
throw new Error(`Failed to get models: ${response.status}`);
|
191 |
-
}
|
192 |
-
|
193 |
-
const data = await response.json() as ModelsResponse;
|
194 |
-
|
195 |
-
// 请求成功后获取新token并存储 (后台刷新)
|
196 |
-
getAnonymousToken().catch(err => console.error("Background token refresh failed:", err));
|
197 |
-
|
198 |
-
return data.result;
|
199 |
-
} catch (error) {
|
200 |
-
console.error("Error getting models:", error);
|
201 |
-
throw error;
|
202 |
-
}
|
203 |
-
}
|
204 |
-
|
205 |
-
// 将SophNet模型转换为OpenAI格式
|
206 |
-
function transformModelsToOpenAIFormat(models: SophNetModel[]) {
|
207 |
-
const transformedModels = [];
|
208 |
-
|
209 |
-
// 为每个模型创建标准版本、搜索版本和全上下文版本
|
210 |
-
for (const model of models) {
|
211 |
-
// 添加标准模型
|
212 |
-
transformedModels.push({
|
213 |
-
id: model.modelFamily,
|
214 |
-
object: "model",
|
215 |
-
created: Date.now(),
|
216 |
-
owned_by: "sophnet",
|
217 |
-
permission: [{
|
218 |
-
id: `modelperm-${model.id}`,
|
219 |
object: "model_permission",
|
220 |
-
created: Date.now(),
|
221 |
allow_create_engine: false,
|
222 |
allow_sampling: true,
|
223 |
-
allow_logprobs:
|
224 |
allow_search_indices: false,
|
225 |
allow_view: true,
|
226 |
allow_fine_tuning: false,
|
227 |
organization: "*",
|
228 |
group: null,
|
229 |
is_blocking: false,
|
230 |
-
}
|
231 |
-
|
232 |
-
|
233 |
-
|
234 |
-
|
235 |
-
// 添加搜索版本模型
|
236 |
-
transformedModels.push({
|
237 |
-
id: `${model.modelFamily}-Search`,
|
238 |
-
object: "model",
|
239 |
-
created: Date.now(),
|
240 |
-
owned_by: "sophnet",
|
241 |
-
permission: [{
|
242 |
-
id: `modelperm-${model.id}-Search`,
|
243 |
-
object: "model_permission",
|
244 |
-
created: Date.now(),
|
245 |
-
allow_create_engine: false,
|
246 |
-
allow_sampling: true,
|
247 |
-
allow_logprobs: false,
|
248 |
-
allow_search_indices: true,
|
249 |
-
allow_view: true,
|
250 |
-
allow_fine_tuning: false,
|
251 |
-
organization: "*",
|
252 |
-
group: null,
|
253 |
-
is_blocking: false,
|
254 |
-
}],
|
255 |
-
root: model.modelFamily,
|
256 |
-
parent: null,
|
257 |
-
});
|
258 |
-
|
259 |
-
// 添加全上下文版本模型
|
260 |
-
transformedModels.push({
|
261 |
-
id: `${model.modelFamily}-Full-Context`,
|
262 |
-
object: "model",
|
263 |
-
created: Date.now(),
|
264 |
-
owned_by: "sophnet",
|
265 |
-
permission: [{
|
266 |
-
id: `modelperm-${model.id}-Full-Context`,
|
267 |
-
object: "model_permission",
|
268 |
-
created: Date.now(),
|
269 |
-
allow_create_engine: false,
|
270 |
-
allow_sampling: true,
|
271 |
-
allow_logprobs: false,
|
272 |
-
allow_search_indices: false,
|
273 |
-
allow_view: true,
|
274 |
-
allow_fine_tuning: false,
|
275 |
-
organization: "*",
|
276 |
-
group: null,
|
277 |
-
is_blocking: false,
|
278 |
-
}],
|
279 |
-
root: model.modelFamily,
|
280 |
-
parent: null,
|
281 |
-
});
|
282 |
-
|
283 |
-
// 添加全上下文+搜索版本模型
|
284 |
-
transformedModels.push({
|
285 |
-
id: `${model.modelFamily}-Full-Context-Search`,
|
286 |
-
object: "model",
|
287 |
-
created: Date.now(),
|
288 |
-
owned_by: "sophnet",
|
289 |
-
permission: [{
|
290 |
-
id: `modelperm-${model.id}-Full-Context-Search`,
|
291 |
-
object: "model_permission",
|
292 |
-
created: Date.now(),
|
293 |
-
allow_create_engine: false,
|
294 |
-
allow_sampling: true,
|
295 |
-
allow_logprobs: false,
|
296 |
-
allow_search_indices: true,
|
297 |
-
allow_view: true,
|
298 |
-
allow_fine_tuning: false,
|
299 |
-
organization: "*",
|
300 |
-
group: null,
|
301 |
-
is_blocking: false,
|
302 |
-
}],
|
303 |
-
root: model.modelFamily,
|
304 |
-
parent: null,
|
305 |
-
});
|
306 |
-
}
|
307 |
|
308 |
-
return {
|
309 |
-
|
310 |
-
|
311 |
-
};
|
312 |
}
|
313 |
|
314 |
-
//
|
315 |
-
function
|
316 |
-
|
317 |
-
|
318 |
-
|
319 |
-
// 提取系统消息(如果存在)
|
320 |
-
const systemMessages = messagesCopy.filter(msg => msg.role === "system");
|
321 |
-
|
322 |
-
// 获取非系统消息
|
323 |
-
const nonSystemMessages = messagesCopy.filter(msg => msg.role !== "system");
|
324 |
-
|
325 |
-
// 如果消息总数少于或等于3对(6条消息),则不需要处理
|
326 |
-
if (nonSystemMessages.length <= 6) {
|
327 |
-
return messages;
|
328 |
}
|
329 |
|
330 |
-
|
331 |
-
|
332 |
-
|
333 |
-
// 提取需要合并的历史消息
|
334 |
-
const historyMessages = nonSystemMessages.slice(0, -6);
|
335 |
-
|
336 |
-
// 创建历史消息的摘要
|
337 |
-
const historySummary = {
|
338 |
-
role: "user",
|
339 |
-
content: `这里是此前的对话上下文: ${JSON.stringify(historyMessages)}`
|
340 |
};
|
341 |
|
342 |
-
//
|
343 |
-
|
344 |
-
}
|
345 |
-
|
346 |
-
// 将数字转换为上标形式
|
347 |
-
function convertToSuperscript(num: number): string {
|
348 |
-
const normalDigits = '0123456789';
|
349 |
-
const superscriptDigits = '⁰¹²³⁴⁵⁶⁷⁸⁹';
|
350 |
-
|
351 |
-
return num.toString()
|
352 |
-
.split('')
|
353 |
-
.map(char => {
|
354 |
-
const index = normalDigits.indexOf(char);
|
355 |
-
return index !== -1 ? superscriptDigits[index] : char;
|
356 |
-
})
|
357 |
-
.join('');
|
358 |
-
}
|
359 |
-
|
360 |
-
// 处理聊天完成请求
|
361 |
-
async function handleChatCompletions(
|
362 |
-
token: string,
|
363 |
-
requestBody: any,
|
364 |
-
stream: boolean,
|
365 |
-
retryCount = 0,
|
366 |
-
): Promise<Response> {
|
367 |
-
// 检查模型名称的后缀
|
368 |
-
const modelId = requestBody.model;
|
369 |
-
const webSearchEnable = modelId.includes("-Search");
|
370 |
-
const fullContextEnable = modelId.includes("-Full-Context");
|
371 |
-
|
372 |
-
// 根据后缀确定实际模型ID
|
373 |
-
let actualModelId = modelId;
|
374 |
-
if (webSearchEnable) actualModelId = actualModelId.replace("-Search", "");
|
375 |
-
if (fullContextEnable) actualModelId = actualModelId.replace("-Full-Context", "");
|
376 |
-
|
377 |
-
// 处理消息
|
378 |
-
let processedMessages = requestBody.messages;
|
379 |
-
if (fullContextEnable) {
|
380 |
-
processedMessages = processFullContext(requestBody.messages);
|
381 |
-
}
|
382 |
-
|
383 |
-
const sophNetBody = {
|
384 |
-
temperature: requestBody.temperature || 0.7,
|
385 |
-
top_p: requestBody.top_p || 0.9,
|
386 |
-
frequency_penalty: requestBody.frequency_penalty || 0,
|
387 |
-
presence_penalty: requestBody.presence_penalty || 0,
|
388 |
-
max_tokens: requestBody.max_tokens || 2048,
|
389 |
-
webSearchEnable: webSearchEnable,
|
390 |
-
stop: requestBody.stop || [],
|
391 |
-
stream: stream.toString(),
|
392 |
-
model_id: actualModelId,
|
393 |
-
messages: processedMessages,
|
394 |
-
};
|
395 |
|
396 |
try {
|
397 |
-
const
|
398 |
-
|
399 |
-
|
400 |
-
|
401 |
-
|
402 |
-
"Content-Type": "application/json",
|
403 |
-
"Authorization": `Bearer anon-${token}`,
|
404 |
-
"Accept": stream ? "text/event-stream" : "application/json",
|
405 |
-
"User-Agent": getRandomUserAgent(), // 使用随机UA
|
406 |
-
},
|
407 |
-
body: JSON.stringify(sophNetBody),
|
408 |
-
},
|
409 |
-
);
|
410 |
-
|
411 |
-
// 如果是401或403错误,尝试刷新token并重试
|
412 |
-
if ((response.status === 401 || response.status === 403) && retryCount < MAX_RETRIES) {
|
413 |
-
console.log(`Chat completion token expired, refreshing and retrying (${retryCount + 1}/${MAX_RETRIES})...`);
|
414 |
-
const newToken = await getAnonymousToken();
|
415 |
-
// 使用指数退避等待
|
416 |
-
const delay = getExponentialBackoffDelay(retryCount);
|
417 |
-
await sleep(delay);
|
418 |
-
return await handleChatCompletions(newToken, requestBody, stream, retryCount + 1);
|
419 |
}
|
420 |
|
421 |
-
//
|
422 |
-
if ((
|
423 |
-
|
424 |
-
console.warn(`Chat completion failed with status ${response.status}. Retrying in ${delay}ms... (${retryCount + 1}/${MAX_RETRIES})`);
|
425 |
-
await sleep(delay);
|
426 |
-
return handleChatCompletions(token, requestBody, stream, retryCount + 1); // 使用当前token重试,如果失败会在上面的逻辑中刷新
|
427 |
}
|
428 |
|
429 |
-
|
430 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
431 |
}
|
432 |
|
433 |
-
|
434 |
-
|
435 |
|
436 |
-
|
|
|
|
|
|
|
|
|
|
|
437 |
|
438 |
-
|
439 |
-
|
440 |
-
|
441 |
-
|
442 |
-
|
443 |
-
console.warn(`Chat completion network error. Retrying in ${delay}ms... (${retryCount + 1}/${MAX_RETRIES})`);
|
444 |
-
await sleep(delay);
|
445 |
-
return handleChatCompletions(token, requestBody, stream, retryCount + 1);
|
446 |
-
}
|
447 |
-
throw error; // 达到最大重试次数后抛出错误
|
448 |
-
}
|
449 |
-
}
|
450 |
|
451 |
-
|
452 |
-
|
453 |
-
|
454 |
-
|
455 |
-
|
456 |
-
|
457 |
-
|
|
|
|
|
458 |
|
459 |
-
|
460 |
-
|
461 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
462 |
|
463 |
-
|
464 |
-
|
465 |
-
|
466 |
-
|
467 |
-
|
468 |
-
|
469 |
-
|
470 |
-
|
471 |
-
|
472 |
-
|
473 |
-
|
474 |
-
model: "sophnet-model", // 可以使用 SophNet 返回的模型名,或者固定一个
|
475 |
-
choices: [
|
476 |
-
{
|
477 |
-
index: 0,
|
478 |
-
delta: {
|
479 |
-
content: `\n\n${referencesSection}`,
|
480 |
-
},
|
481 |
-
finish_reason: null, // 在发送参考资料时,finish_reason通常为null
|
482 |
-
},
|
483 |
-
],
|
484 |
-
})}\n\n`;
|
485 |
-
referencesEmitted = true;
|
486 |
-
}
|
487 |
-
break;
|
488 |
}
|
|
|
|
|
489 |
|
490 |
-
|
491 |
-
|
492 |
-
|
493 |
-
|
494 |
-
|
495 |
-
|
496 |
-
|
497 |
-
|
498 |
-
|
499 |
-
|
500 |
-
|
501 |
-
|
502 |
-
|
503 |
-
|
504 |
-
|
505 |
-
|
506 |
-
|
507 |
-
|
508 |
-
|
509 |
-
index: 0,
|
510 |
delta: {
|
511 |
-
|
|
|
|
|
512 |
},
|
513 |
-
finish_reason:
|
514 |
-
},
|
515 |
-
|
516 |
-
|
517 |
-
|
|
|
|
|
|
|
518 |
}
|
519 |
-
|
520 |
-
|
521 |
-
|
522 |
-
|
523 |
-
|
524 |
-
|
525 |
-
|
526 |
-
|
527 |
-
|
528 |
-
|
529 |
-
|
530 |
-
|
531 |
-
|
532 |
-
|
533 |
-
|
534 |
-
|
535 |
-
|
536 |
-
|
537 |
-
|
538 |
-
|
539 |
-
|
540 |
-
|
541 |
-
|
542 |
-
|
543 |
-
|
544 |
-
|
545 |
-
|
546 |
-
|
547 |
-
choices: [
|
548 |
-
{
|
549 |
-
index: 0,
|
550 |
-
delta: {
|
551 |
-
// SophNet的引用信息通常不在content中,我们需要手动添加
|
552 |
-
content: `[${superscriptIndex}](${ref.url})`,
|
553 |
-
},
|
554 |
-
finish_reason: null, // 引用事件通常没有finish_reason
|
555 |
-
},
|
556 |
-
],
|
557 |
-
})}\n\n`;
|
558 |
}
|
559 |
-
|
560 |
-
|
561 |
-
// 转换为OpenAI格式的事件
|
562 |
-
const openAIEvent = {
|
563 |
-
id: sophNetEvent.id || `chatcmpl-${Date.now()}`, // 使用SophNet ID或生成新ID
|
564 |
-
object: "chat.completion.chunk",
|
565 |
-
created: Math.floor(Date.now() / 1000),
|
566 |
-
model: sophNetEvent.model || "sophnet-model", // 使用SophNet模型名或固定
|
567 |
-
choices: [
|
568 |
-
{
|
569 |
-
index: 0,
|
570 |
-
delta: {
|
571 |
-
// OpenAI通常将内容放在delta.content
|
572 |
-
reasoning_content: sophNetEvent.choices?.[0]?.delta?.reasoning_content || "",
|
573 |
-
content: sophNetEvent.choices?.[0]?.delta?.content || "",
|
574 |
-
},
|
575 |
-
finish_reason: sophNetEvent.choices?.[0]?.finish_reason || null,
|
576 |
-
},
|
577 |
-
],
|
578 |
-
};
|
579 |
|
580 |
-
|
581 |
-
|
582 |
-
|
583 |
-
|
584 |
-
|
585 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
586 |
}
|
|
|
|
|
|
|
|
|
|
|
587 |
}
|
588 |
-
|
589 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
590 |
}
|
591 |
}
|
592 |
|
593 |
-
//
|
594 |
-
function
|
595 |
-
|
596 |
-
|
597 |
-
|
598 |
-
|
599 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
600 |
});
|
601 |
-
|
602 |
-
return section;
|
603 |
}
|
604 |
|
605 |
-
//
|
606 |
-
|
607 |
-
|
608 |
-
|
609 |
-
|
610 |
-
|
611 |
-
|
612 |
-
|
613 |
-
|
614 |
-
if (sophNetResponse.choices?.[0]?.message?.refs && sophNetResponse.choices[0].message.refs.length > 0) {
|
615 |
-
for (const ref of sophNetResponse.choices[0].message.refs) {
|
616 |
-
references.push(ref);
|
617 |
-
}
|
618 |
-
|
619 |
-
// 为每个引用添加上标标记
|
620 |
-
references.forEach((ref, index) => {
|
621 |
-
const refIndex = index + 1;
|
622 |
-
const superscriptIndex = `⁽${convertToSuperscript(refIndex)}⁾`;
|
623 |
-
// 在内容末尾添加引用标记
|
624 |
-
content += ` [${superscriptIndex}](${ref.url})`;
|
625 |
-
});
|
626 |
-
|
627 |
-
// 添加参考资料部分
|
628 |
-
if (references.length > 0) {
|
629 |
-
content += "\n\n" + generateReferencesSection(references);
|
630 |
-
}
|
631 |
-
}
|
632 |
-
|
633 |
-
return {
|
634 |
-
id: sophNetResponse.id || `chatcmpl-${Date.now()}`, // 使用SophNet ID或生成新ID
|
635 |
-
object: "chat.completion",
|
636 |
-
created: Math.floor(Date.now() / 1000),
|
637 |
-
model: sophNetResponse.model || "sophnet-model", // 使用SophNet模型名或固定
|
638 |
-
choices: [
|
639 |
-
{
|
640 |
-
index: 0,
|
641 |
-
message: {
|
642 |
-
role: "assistant",
|
643 |
-
reasoning_content: sophNetResponse.choices?.[0]?.message?.reasoning_content || "",
|
644 |
-
content: content,
|
645 |
-
},
|
646 |
-
finish_reason: sophNetResponse.choices?.[0]?.finish_reason || "stop",
|
647 |
},
|
648 |
-
|
649 |
-
|
650 |
-
prompt_tokens: 0,
|
651 |
-
completion_tokens: 0,
|
652 |
-
total_tokens: 0,
|
653 |
-
},
|
654 |
-
};
|
655 |
}
|
656 |
|
657 |
-
//
|
658 |
async function handler(req: Request): Promise<Response> {
|
659 |
const url = new URL(req.url);
|
660 |
-
const path = url.pathname;
|
661 |
-
|
662 |
-
// CORS预检请求处理
|
663 |
-
if (req.method === "OPTIONS") {
|
664 |
-
return new Response(null, {
|
665 |
-
status: 204,
|
666 |
-
headers: {
|
667 |
-
"Access-Control-Allow-Origin": "*",
|
668 |
-
"Access-Control-Allow-Methods": "GET, POST, OPTIONS",
|
669 |
-
"Access-Control-Allow-Headers": "Content-Type, Authorization",
|
670 |
-
"Access-Control-Max-Age": "86400",
|
671 |
-
},
|
672 |
-
});
|
673 |
-
}
|
674 |
-
|
675 |
-
// 获取有效token
|
676 |
-
let token;
|
677 |
-
try {
|
678 |
-
token = await getValidToken();
|
679 |
-
} catch (error) {
|
680 |
-
console.error("Failed to get token in handler:", error);
|
681 |
-
return new Response(
|
682 |
-
JSON.stringify({ error: "Failed to get token", details: error.message }),
|
683 |
-
{
|
684 |
-
status: 500,
|
685 |
-
headers: {
|
686 |
-
"Content-Type": "application/json",
|
687 |
-
"Access-Control-Allow-Origin": "*",
|
688 |
-
},
|
689 |
-
},
|
690 |
-
);
|
691 |
-
}
|
692 |
-
|
693 |
-
try {
|
694 |
-
// 模型列表接口
|
695 |
-
if (path === "/v1/models" && req.method === "GET") {
|
696 |
-
const models = await getModels(token);
|
697 |
-
const openAIModels = transformModelsToOpenAIFormat(models);
|
698 |
|
699 |
-
|
700 |
-
|
701 |
-
|
702 |
-
|
703 |
-
|
704 |
-
|
705 |
-
});
|
706 |
-
}
|
707 |
-
|
708 |
-
// 聊天完成接口
|
709 |
-
else if (path === "/v1/chat/completions" && req.method === "POST") {
|
710 |
-
const requestBody = await req.json();
|
711 |
-
const stream = requestBody.stream === true;
|
712 |
-
|
713 |
-
const sophNetResponse = await handleChatCompletions(token, requestBody, stream);
|
714 |
-
|
715 |
-
if (stream) {
|
716 |
-
const transformedStream = new ReadableStream({
|
717 |
-
async start(controller) {
|
718 |
-
try {
|
719 |
-
for await (const chunk of transformStreamResponse(sophNetResponse.body!)) {
|
720 |
-
controller.enqueue(new TextEncoder().encode(chunk));
|
721 |
-
}
|
722 |
-
controller.close();
|
723 |
-
} catch (error) {
|
724 |
-
console.error("Stream transformation error:", error);
|
725 |
-
// 在流中发送错误信息
|
726 |
-
const errorData = JSON.stringify({
|
727 |
-
error: {
|
728 |
-
message: `Stream processing error: ${error.message}`,
|
729 |
-
type: "stream_error",
|
730 |
-
code: null,
|
731 |
-
}
|
732 |
-
});
|
733 |
-
controller.enqueue(new TextEncoder().encode(`data: ${errorData}\n\n`));
|
734 |
-
controller.enqueue(new TextEncoder().encode("data: [DONE]\n\n"));
|
735 |
-
controller.close();
|
736 |
-
}
|
737 |
-
},
|
738 |
-
});
|
739 |
-
|
740 |
-
return new Response(transformedStream, {
|
741 |
-
headers: {
|
742 |
-
"Content-Type": "text/event-stream",
|
743 |
-
"Cache-Control": "no-cache",
|
744 |
-
"Connection": "keep-alive",
|
745 |
-
"Access-Control-Allow-Origin": "*",
|
746 |
-
},
|
747 |
-
});
|
748 |
-
} else {
|
749 |
-
const transformedResponse = await transformNonStreamResponse(sophNetResponse);
|
750 |
-
|
751 |
-
return new Response(JSON.stringify(transformedResponse), {
|
752 |
-
status: 200,
|
753 |
-
headers: {
|
754 |
-
"Content-Type": "application/json",
|
755 |
-
"Access-Control-Allow-Origin": "*",
|
756 |
-
},
|
757 |
-
});
|
758 |
-
}
|
759 |
-
}
|
760 |
-
|
761 |
-
// 未找到路由
|
762 |
-
else {
|
763 |
-
return new Response(
|
764 |
-
JSON.stringify({ error: "Not found", message: "Endpoint not supported" }),
|
765 |
-
{
|
766 |
-
status: 404,
|
767 |
-
headers: {
|
768 |
-
"Content-Type": "application/json",
|
769 |
-
"Access-Control-Allow-Origin": "*",
|
770 |
-
},
|
771 |
-
},
|
772 |
-
);
|
773 |
-
}
|
774 |
-
} catch (error) {
|
775 |
-
console.error("Request handling error:", error);
|
776 |
-
|
777 |
-
// 返回统一的错误响应格式
|
778 |
-
return new Response(
|
779 |
-
JSON.stringify({
|
780 |
-
error: {
|
781 |
-
message: error.message,
|
782 |
-
type: "api_error", // 或者更具体的错误类型
|
783 |
-
code: error.status || null, // 如果是HTTP错误,包含状态码
|
784 |
-
}
|
785 |
-
}),
|
786 |
-
{
|
787 |
-
status: error.status || 500, // 使用错误状态码或默认500
|
788 |
-
headers: {
|
789 |
-
"Content-Type": "application/json",
|
790 |
-
"Access-Control-Allow-Origin": "*",
|
791 |
-
},
|
792 |
-
},
|
793 |
-
);
|
794 |
}
|
795 |
}
|
796 |
|
797 |
-
|
798 |
-
|
799 |
-
serve(handler, { port: PORT });
|
|
|
1 |
+
import { serve } from "https://deno.land/std@0.208.0/http/server.ts";
|
2 |
+
|
3 |
+
// Julep API Base URL (fixed)
|
4 |
+
const JULEP_API_BASE = "https://api.julep.ai/api";
|
5 |
+
|
6 |
+
// Hardcoded list of models (Agent IDs in this context)
|
7 |
+
const HARDCODED_MODELS = [
|
8 |
+
'mistral-large-2411', 'o1', 'text-embedding-3-large', 'vertex_ai/text-embedding-004',
|
9 |
+
'claude-3.5-haiku', 'cerebras/llama-4-scout-17b-16e-instruct', 'llama-3.1-8b',
|
10 |
+
'magnum-v4-72b', 'voyage-multilingual-2', 'claude-3-haiku', 'gpt-4o',
|
11 |
+
'BAAI/bge-m3', 'openrouter/meta-llama/llama-4-maverick', 'openrouter/meta-llama/llama-4-scout',
|
12 |
+
'claude-3.5-sonnet', 'hermes-3-llama-3.1-70b', 'claude-3.5-sonnet-20240620',
|
13 |
+
'qwen-2.5-72b-instruct', 'l3.3-euryale-70b', 'gpt-4o-mini', 'cerebras/llama-3.3-70b',
|
14 |
+
'o1-preview', 'gemini-1.5-pro-latest', 'l3.1-euryale-70b', 'claude-3-sonnet',
|
15 |
+
'Alibaba-NLP/gte-large-en-v1.5', 'openrouter/meta-llama/llama-4-scout:free',
|
16 |
+
'llama-3.1-70b', 'eva-qwen-2.5-72b', 'claude-3.5-sonnet-20241022', 'gemini-2.0-flash',
|
17 |
+
'deepseek-chat', 'o1-mini', 'eva-llama-3.33-70b', 'gemini-2.5-pro-preview-03-25',
|
18 |
+
'gemini-1.5-pro', 'gpt-4-turbo', 'openrouter/meta-llama/llama-4-maverick:free',
|
19 |
+
'o3-mini', 'claude-3.7-sonnet', 'voyage-3', 'cerebras/llama-3.1-8b', 'claude-3-opus'
|
20 |
+
];
|
21 |
+
|
22 |
+
// Helper function to get Julep API Key from Authorization header
|
23 |
+
function getJulepApiKey(req: Request): string | null {
|
24 |
+
const authHeader = req.headers.get("Authorization");
|
25 |
+
if (authHeader && authHeader.startsWith("Bearer ")) {
|
26 |
+
return authHeader.substring(7); // Extract the token after "Bearer "
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
27 |
}
|
28 |
+
return null;
|
29 |
}
|
30 |
|
31 |
+
// OpenAI Models endpoint handler (hardcoded)
|
32 |
+
async function handleModels(req: Request): Promise<Response> {
|
33 |
+
const julepApiKey = getJulepApiKey(req);
|
34 |
+
if (!julepApiKey) {
|
35 |
+
return new Response("Unauthorized: Missing or invalid Authorization header", { status: 401 });
|
|
|
|
|
|
|
36 |
}
|
37 |
|
38 |
+
// Format hardcoded models into OpenAI models format
|
39 |
+
const openaiModels = HARDCODED_MODELS.map((modelId) => ({
|
40 |
+
id: modelId,
|
41 |
+
object: "model",
|
42 |
+
created: Math.floor(Date.now() / 1000), // Use current time for creation
|
43 |
+
owned_by: "julep", // Or "openai" if you prefer
|
44 |
+
permission: [
|
|
|
|
|
45 |
{
|
46 |
+
id: `modelperm-${modelId}`,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
47 |
object: "model_permission",
|
48 |
+
created: Math.floor(Date.now() / 1000),
|
49 |
allow_create_engine: false,
|
50 |
allow_sampling: true,
|
51 |
+
allow_logprobs: true,
|
52 |
allow_search_indices: false,
|
53 |
allow_view: true,
|
54 |
allow_fine_tuning: false,
|
55 |
organization: "*",
|
56 |
group: null,
|
57 |
is_blocking: false,
|
58 |
+
},
|
59 |
+
],
|
60 |
+
root: modelId,
|
61 |
+
parent: null,
|
62 |
+
}));
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
63 |
|
64 |
+
return new Response(JSON.stringify({ data: openaiModels, object: "list" }), {
|
65 |
+
headers: { "Content-Type": "application/json" },
|
66 |
+
status: 200,
|
67 |
+
});
|
68 |
}
|
69 |
|
70 |
+
// OpenAI Chat Completions endpoint handler
|
71 |
+
async function handleChatCompletions(req: Request): Promise<Response> {
|
72 |
+
const julepApiKey = getJulepApiKey(req);
|
73 |
+
if (!julepApiKey) {
|
74 |
+
return new Response("Unauthorized: Missing or invalid Authorization header", { status: 401 });
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
75 |
}
|
76 |
|
77 |
+
const headers = {
|
78 |
+
"Authorization": `Bearer ${julepApiKey}`,
|
79 |
+
"Content-Type": "application/json",
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
80 |
};
|
81 |
|
82 |
+
let agentId: string | null = null; // Variable to store the created agent ID
|
83 |
+
let sessionId: string | null = null; // Variable to store the created session ID
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
84 |
|
85 |
try {
|
86 |
+
const requestBody = await req.json();
|
87 |
+
const { model, messages, stream, ...rest } = requestBody;
|
88 |
+
|
89 |
+
if (!model || !messages || !Array.isArray(messages) || messages.length === 0) {
|
90 |
+
return new Response("Invalid request body. 'model' and 'messages' are required.", { status: 400 });
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
91 |
}
|
92 |
|
93 |
+
// Check if the requested model is in our hardcoded list
|
94 |
+
if (!HARDCODED_MODELS.includes(model)) {
|
95 |
+
return new Response(`Invalid model: ${model}. Please use one of the available models.`, { status: 400 });
|
|
|
|
|
|
|
96 |
}
|
97 |
|
98 |
+
// 1. Create a new Agent for this request
|
99 |
+
const createAgentUrl = `${JULEP_API_BASE}/agents`;
|
100 |
+
const createAgentBody = {
|
101 |
+
name: model, // Set agent name to the model value
|
102 |
+
model: model, // Use the requested OpenAI model as the Julep Agent's model
|
103 |
+
about: model, // Set agent about to the model value
|
104 |
+
instructions: ["Follow user instructions carefully."], // Keep some default instructions
|
105 |
+
};
|
106 |
+
|
107 |
+
const createAgentResponse = await fetch(createAgentUrl, {
|
108 |
+
method: "POST",
|
109 |
+
headers,
|
110 |
+
body: JSON.stringify(createAgentBody),
|
111 |
+
});
|
112 |
+
|
113 |
+
if (!createAgentResponse.ok) {
|
114 |
+
const errorText = await createAgentResponse.text();
|
115 |
+
console.error(`Error creating Julep Agent: ${createAgentResponse.status} - ${errorText}`);
|
116 |
+
return new Response(`Error creating Julep Agent: ${createAgentResponse.statusText}`, { status: createAgentResponse.status });
|
117 |
}
|
118 |
|
119 |
+
const agentData = await createAgentResponse.json();
|
120 |
+
agentId = agentData.id; // Store the agent ID
|
121 |
|
122 |
+
// 2. Create a Session using the new Agent ID
|
123 |
+
const createSessionUrl = `${JULEP_API_BASE}/sessions`;
|
124 |
+
const createSessionBody = {
|
125 |
+
agent: agentId, // Use the newly created Agent ID
|
126 |
+
// You can add other Session creation parameters here if needed
|
127 |
+
};
|
128 |
|
129 |
+
const createSessionResponse = await fetch(createSessionUrl, {
|
130 |
+
method: "POST",
|
131 |
+
headers,
|
132 |
+
body: JSON.stringify(createSessionBody),
|
133 |
+
});
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
134 |
|
135 |
+
if (!createSessionResponse.ok) {
|
136 |
+
const errorText = await createSessionResponse.text();
|
137 |
+
console.error(`Error creating Julep Session: ${createSessionResponse.status} - ${errorText}`);
|
138 |
+
// Attempt to clean up the temporary agent
|
139 |
+
if (agentId) {
|
140 |
+
fetch(`${JULEP_API_BASE}/agents/${agentId}`, { method: "DELETE", headers }).catch(console.error);
|
141 |
+
}
|
142 |
+
return new Response(`Error creating Julep Session: ${createSessionResponse.statusText}`, { status: createSessionResponse.status });
|
143 |
+
}
|
144 |
|
145 |
+
const sessionData = await createSessionResponse.json();
|
146 |
+
sessionId = sessionData.id; // Store the session ID
|
147 |
+
|
148 |
+
// 3. Perform Chat Completion
|
149 |
+
const chatUrl = `${JULEP_API_BASE}/sessions/${sessionId}/chat`;
|
150 |
+
const chatBody = {
|
151 |
+
messages: messages.map((msg: any) => ({
|
152 |
+
role: msg.role,
|
153 |
+
content: typeof msg.content === 'string' ? msg.content : JSON.stringify(msg.content), // Handle potential object content
|
154 |
+
// Map other relevant fields if necessary
|
155 |
+
})),
|
156 |
+
stream: stream === true,
|
157 |
+
...rest, // Forward any other parameters from the OpenAI request
|
158 |
+
};
|
159 |
+
|
160 |
+
const chatResponse = await fetch(chatUrl, {
|
161 |
+
method: "POST",
|
162 |
+
headers,
|
163 |
+
body: JSON.stringify(chatBody),
|
164 |
+
});
|
165 |
|
166 |
+
// 4. Handle Response and Clean Up
|
167 |
+
if (!chatResponse.ok) {
|
168 |
+
// If the chat request itself fails, read the error body and then clean up
|
169 |
+
const errorText = await chatResponse.text();
|
170 |
+
console.error(`Error during Julep Chat Completion: ${chatResponse.status} - ${errorText}`);
|
171 |
+
// Attempt to clean up the temporary agent and session
|
172 |
+
if (sessionId) {
|
173 |
+
fetch(`${JULEP_API_BASE}/sessions/${sessionId}`, { method: "DELETE", headers }).catch(console.error);
|
174 |
+
}
|
175 |
+
if (agentId) {
|
176 |
+
fetch(`${JULEP_API_BASE}/agents/${agentId}`, { method: "DELETE", headers }).catch(console.error);
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
177 |
}
|
178 |
+
return new Response(`Error during Julep Chat Completion: ${chatResponse.statusText} - ${errorText}`, { status: chatResponse.status });
|
179 |
+
}
|
180 |
|
181 |
+
if (stream) {
|
182 |
+
// Handle streaming response (Server-Sent Events)
|
183 |
+
// Pipe the Julep response body directly to the client response body
|
184 |
+
// and add cleanup to the end of the stream.
|
185 |
+
const readableStream = chatResponse.body!.pipeThrough(new TextDecoderStream()).pipeThrough(new TransformStream({
|
186 |
+
transform(chunk, controller) {
|
187 |
+
// Parse Julep streaming chunks and format as OpenAI SSE
|
188 |
+
const lines = chunk.split('\n').filter(line => line.trim() !== '');
|
189 |
+
for (const line of lines) {
|
190 |
+
if (line.startsWith('data:')) {
|
191 |
+
const data = JSON.parse(line.substring(5).trim());
|
192 |
+
// Format the Julep chunk data into OpenAI SSE format
|
193 |
+
const openaiChunk = {
|
194 |
+
id: data.id,
|
195 |
+
object: "chat.completion.chunk",
|
196 |
+
created: Math.floor(new Date(data.created_at).getTime() / 1000),
|
197 |
+
model: model, // Use the requested model ID
|
198 |
+
choices: data.choices.map((choice: any) => ({
|
199 |
+
index: choice.index,
|
|
|
200 |
delta: {
|
201 |
+
role: choice.delta.role,
|
202 |
+
content: choice.delta.content,
|
203 |
+
tool_calls: choice.delta.tool_calls ? toolCallDeltaToOpenAI(choice.delta.tool_calls) : undefined,
|
204 |
},
|
205 |
+
finish_reason: choice.finish_reason,
|
206 |
+
})),
|
207 |
+
};
|
208 |
+
controller.enqueue(`data: ${JSON.stringify(openaiChunk)}\n\n`);
|
209 |
+
} else {
|
210 |
+
// Pass through non-data lines like comments or empty lines if needed
|
211 |
+
controller.enqueue(`${line}\n`);
|
212 |
+
}
|
213 |
}
|
214 |
+
},
|
215 |
+
}));
|
216 |
+
|
217 |
+
// Attach cleanup to the end of the stream
|
218 |
+
// We need to duplicate the stream to be able to pipe it to the client response
|
219 |
+
// AND to a WritableStream for cleanup.
|
220 |
+
const [stream1, stream2] = readableStream.tee();
|
221 |
+
|
222 |
+
const cleanupPromise = new Promise<void>((resolve, reject) => {
|
223 |
+
stream2.pipeTo(new WritableStream({
|
224 |
+
close: () => {
|
225 |
+
if (sessionId) {
|
226 |
+
fetch(`${JULEP_API_BASE}/sessions/${sessionId}`, { method: "DELETE", headers }).catch(console.error);
|
227 |
+
}
|
228 |
+
if (agentId) {
|
229 |
+
fetch(`${JULEP_API_BASE}/agents/${agentId}`, { method: "DELETE", headers }).catch(console.error);
|
230 |
+
}
|
231 |
+
resolve();
|
232 |
+
},
|
233 |
+
abort: (reason) => {
|
234 |
+
console.error("Stream aborted:", reason);
|
235 |
+
if (sessionId) {
|
236 |
+
fetch(`${JULEP_API_BASE}/sessions/${sessionId}`, { method: "DELETE", headers }).catch(console.error);
|
237 |
+
}
|
238 |
+
if (agentId) {
|
239 |
+
fetch(`${JULEP_API_BASE}/agents/${agentId}`, { method: "DELETE", headers }).catch(console.error);
|
240 |
+
}
|
241 |
+
reject(reason);
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
242 |
}
|
243 |
+
})).catch(reject);
|
244 |
+
});
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
245 |
|
246 |
+
// Return the response with the first stream.
|
247 |
+
return new Response(stream1, {
|
248 |
+
headers: {
|
249 |
+
"Content-Type": "text/event-stream",
|
250 |
+
"Cache-Control": "no-cache",
|
251 |
+
"Connection": "keep-alive",
|
252 |
+
},
|
253 |
+
status: 200,
|
254 |
+
});
|
255 |
+
|
256 |
+
} else {
|
257 |
+
// Handle non-streaming response
|
258 |
+
const julepChatData = await chatResponse.json();
|
259 |
+
|
260 |
+
const openaiCompletion = {
|
261 |
+
id: julepChatData.id,
|
262 |
+
object: "chat.completion",
|
263 |
+
created: Math.floor(new Date(julepChatData.created_at).getTime() / 1000),
|
264 |
+
model: model, // Use the requested model ID
|
265 |
+
choices: julepChatData.choices.map((choice: any) => ({
|
266 |
+
index: choice.index,
|
267 |
+
message: {
|
268 |
+
role: choice.message.role,
|
269 |
+
content: choice.message.content,
|
270 |
+
tool_calls: choice.message.tool_calls ? toolCallMessageToOpenAI(choice.message.tool_calls) : undefined,
|
271 |
+
},
|
272 |
+
finish_reason: choice.finish_reason,
|
273 |
+
})),
|
274 |
+
usage: julepChatData.usage ? {
|
275 |
+
prompt_tokens: julepChatData.usage.prompt_tokens,
|
276 |
+
completion_tokens: julepChatData.usage.completion_tokens,
|
277 |
+
total_tokens: julepChatData.usage.total_tokens,
|
278 |
+
} : undefined,
|
279 |
+
};
|
280 |
+
|
281 |
+
// Attempt to clean up the temporary agent and session (fire and forget)
|
282 |
+
if (sessionId) {
|
283 |
+
fetch(`${JULEP_API_BASE}/sessions/${sessionId}`, { method: "DELETE", headers }).catch(console.error);
|
284 |
+
}
|
285 |
+
if (agentId) {
|
286 |
+
fetch(`${JULEP_API_BASE}/agents/${agentId}`, { method: "DELETE", headers }).catch(console.error);
|
287 |
}
|
288 |
+
|
289 |
+
return new Response(JSON.stringify(openaiCompletion), {
|
290 |
+
headers: { "Content-Type": "application/json" },
|
291 |
+
status: 200,
|
292 |
+
});
|
293 |
}
|
294 |
+
|
295 |
+
} catch (error) {
|
296 |
+
console.error("Error handling chat completions request:", error);
|
297 |
+
// Attempt to clean up in case of errors before session/agent creation
|
298 |
+
if (sessionId) {
|
299 |
+
fetch(`${JULEP_API_BASE}/sessions/${sessionId}`, { method: "DELETE", headers }).catch(console.error);
|
300 |
+
}
|
301 |
+
if (agentId) {
|
302 |
+
fetch(`${Julep_API_BASE}/agents/${agentId}`, { method: "DELETE", headers }).catch(console.error);
|
303 |
+
}
|
304 |
+
return new Response("Internal Server Error", { status: 500 });
|
305 |
}
|
306 |
}
|
307 |
|
308 |
+
// Helper to format Julep ToolCall delta to OpenAI format
|
309 |
+
function toolCallDeltaToOpenAI(julepToolCalls: any[]): any[] {
|
310 |
+
return julepToolCalls.map(toolCall => {
|
311 |
+
// Assuming Julep's delta format for tool_calls is similar to the message format
|
312 |
+
// and contains function objects directly. Adjust if necessary.
|
313 |
+
return {
|
314 |
+
id: toolCall.id,
|
315 |
+
type: "function",
|
316 |
+
function: {
|
317 |
+
name: toolCall.function?.name,
|
318 |
+
arguments: toolCall.function?.arguments, // Arguments might be streamed as chunks
|
319 |
+
},
|
320 |
+
};
|
321 |
});
|
|
|
|
|
322 |
}
|
323 |
|
324 |
+
// Helper to format Julep ToolCall message to OpenAI format
|
325 |
+
function toolCallMessageToOpenAI(julepToolCalls: any[]): any[] {
|
326 |
+
return julepToolCalls.map(toolCall => {
|
327 |
+
return {
|
328 |
+
id: toolCall.id,
|
329 |
+
type: "function",
|
330 |
+
function: {
|
331 |
+
name: toolCall.function?.name,
|
332 |
+
arguments: toolCall.function?.arguments, // Arguments should be complete in non-streaming
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
333 |
},
|
334 |
+
};
|
335 |
+
});
|
|
|
|
|
|
|
|
|
|
|
336 |
}
|
337 |
|
338 |
+
// Main request handler
|
339 |
async function handler(req: Request): Promise<Response> {
|
340 |
const url = new URL(req.url);
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
341 |
|
342 |
+
if (url.pathname === "/v1/models" && req.method === "GET") {
|
343 |
+
return handleModels(req);
|
344 |
+
} else if (url.pathname === "/v1/chat/completions" && req.method === "POST") {
|
345 |
+
return handleChatCompletions(req);
|
346 |
+
} else {
|
347 |
+
return new Response("Not Found", { status: 404 });
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
348 |
}
|
349 |
}
|
350 |
|
351 |
+
console.log(`HTTP server running on http://localhost:8000`);
|
352 |
+
serve(handler, { port: 7860 });
|
|