ERROR418 commited on
Commit
9e30bf5
·
verified ·
1 Parent(s): c5b2aac

Update main.ts

Browse files
Files changed (1) hide show
  1. main.ts +353 -715
main.ts CHANGED
@@ -1,799 +1,437 @@
1
- import { serve } from "https://deno.land/[email protected]/http/server.ts";
 
2
 
3
- // 定义常量
4
- const SOPHNET_BASE_URL = "https://www.sophnet.com/api";
5
- const PROJECT_UUID = "Ar79PWUQUAhjJOja2orHs";
6
- const PORT = 7860;
7
- const TOKEN_KEY = "sophnet_anonymous_token";
8
- const MAX_RETRIES = 5; // 增加最大重试次数
9
- const INITIAL_RETRY_DELAY_MS = 100; // 初始重试延迟(毫秒)
10
- const MAX_RETRY_DELAY_MS = 5000; // 最大重试延迟(毫秒)
11
 
12
- // 初始化Deno KV
13
- const kv = await Deno.openKv();
14
-
15
- // 定义接口
16
- interface AnonymousTokenResponse {
17
- status: number;
18
- message: string;
19
- result: {
20
- anonymousToken: string;
21
- expires: string;
22
- };
23
- timestamp: number;
24
- }
25
-
26
- interface SophNetModel {
27
- id: number;
28
- serviceUuid: string | null;
29
- projectUuid: string;
30
- displayName: string;
31
- modelFamily: string;
32
- available: boolean;
33
- isBaseModel: boolean;
34
- features: any;
35
- supportedStream: boolean;
36
- supportedImageInputs: boolean;
37
- schema: Array<{
38
- name: string;
39
- displayName: string;
40
- des: string;
41
- type: string;
42
- range: number[];
43
- defaultValue: number;
44
- required: boolean;
45
- }>;
46
- }
47
-
48
- interface ModelsResponse {
49
- status: number;
50
- message: string;
51
- result: SophNetModel[];
52
- timestamp: number;
53
- }
54
-
55
- interface TokenInfo {
56
- token: string;
57
- expires: string;
58
- }
59
-
60
- interface Message {
61
- role: string;
62
- content: string;
63
- }
64
-
65
- interface Reference {
66
- content: string;
67
- id: string;
68
- index: number;
69
- title: string;
70
- type: string;
71
- url: string;
72
- }
73
-
74
- // 随机生成一个用户代理字符串
75
- function getRandomUserAgent(): string {
76
- const userAgents = [
77
- "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/108.0.0.0 Safari/537.36",
78
- "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/16.1 Safari/605.1.15",
79
- "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36 Edg/119.0.2151.44",
80
- "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36",
81
- "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36",
82
- "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36",
83
- "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:109.0) Gecko/20100101 Firefox/119.0",
84
- "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:109.0) Gecko/20100101 Firefox/119.0",
85
- "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/119.0",
86
- ];
87
- return userAgents[Math.floor(Math.random() * userAgents.length)];
88
- }
89
-
90
- // 计算指数退避延迟
91
- function getExponentialBackoffDelay(retryCount: number): number {
92
- const delay = INITIAL_RETRY_DELAY_MS * Math.pow(2, retryCount);
93
- const jitter = Math.random() * INITIAL_RETRY_DELAY_MS; // 添加随机抖动
94
- return Math.min(delay + jitter, MAX_RETRY_DELAY_MS);
95
- }
96
-
97
- // 延迟函数
98
- function sleep(ms: number): Promise<void> {
99
- return new Promise(resolve => setTimeout(resolve, ms));
100
- }
101
-
102
- // 从KV获取token
103
- async function getTokenFromKV(): Promise<TokenInfo | null> {
104
- const tokenEntry = await kv.get<TokenInfo>([TOKEN_KEY]);
105
- return tokenEntry.value;
106
- }
107
-
108
- // 存储token到KV
109
- async function storeTokenToKV(token: string, expires: string): Promise<void> {
110
- await kv.set([TOKEN_KEY], { token, expires });
111
- }
112
-
113
- // 获取匿名token
114
- async function getAnonymousToken(retryCount = 0): Promise<string> {
115
  try {
116
- const response = await fetch(`${SOPHNET_BASE_URL}/sys/login/anonymous`, {
117
- method: "GET",
118
  headers: {
119
- "Accept": "application/json",
120
- "User-Agent": getRandomUserAgent(), // 使用随机UA
 
 
121
  },
 
122
  });
123
 
124
- // 如果是 429 或 500 错误,进行重试
125
- if ((response.status === 429 || response.status >= 500) && retryCount < MAX_RETRIES) {
126
- const delay = getExponentialBackoffDelay(retryCount);
127
- console.warn(`Get token failed with status ${response.status}. Retrying in ${delay}ms... (${retryCount + 1}/${MAX_RETRIES})`);
128
- await sleep(delay);
129
- return getAnonymousToken(retryCount + 1);
130
- }
131
-
132
  if (!response.ok) {
133
- throw new Error(`Failed to get token: ${response.status}`);
134
  }
135
 
136
- const data = await response.json() as AnonymousTokenResponse;
137
- await storeTokenToKV(data.result.anonymousToken, data.result.expires);
138
- return data.result.anonymousToken;
139
  } catch (error) {
140
- console.error("Error getting anonymous token:", error);
141
- throw error;
142
  }
143
  }
144
 
145
- // 获取有效token
146
- async function getValidToken(): Promise<string> {
147
- // 先尝试从KV获取
148
- const tokenInfo = await getTokenFromKV();
149
-
150
- // 如果KV中有token且未过期,则使用该token
151
- if (tokenInfo && new Date(tokenInfo.expires) > new Date()) {
152
- return tokenInfo.token;
153
- }
154
-
155
- // 否则获取新token
156
- return await getAnonymousToken();
 
 
157
  }
158
 
159
- // 获取模型列表
160
- async function getModels(token: string, retryCount = 0): Promise<SophNetModel[]> {
 
 
 
 
 
161
  try {
162
- const response = await fetch(
163
- `${SOPHNET_BASE_URL}/public/playground/models?projectUuid=${PROJECT_UUID}`,
164
- {
165
- method: "GET",
166
- headers: {
167
- "Accept": "application/json",
168
- "User-Agent": getRandomUserAgent(), // 使用随机UA
169
- "Authorization": `Bearer anon-${token}`,
170
- },
171
- },
172
- );
173
-
174
- // 如果是401或403错误,尝试刷新token并重试
175
- if ((response.status === 401 || response.status === 403) && retryCount < MAX_RETRIES) {
176
- console.log(`Token expired, refreshing and retrying models request (${retryCount + 1}/${MAX_RETRIES})...`);
177
- const newToken = await getAnonymousToken();
178
- return await getModels(newToken, retryCount + 1);
179
  }
180
 
181
- // 如果是 429 或 500 错误,进行重试
182
- if ((response.status === 429 || response.status >= 500) && retryCount < MAX_RETRIES) {
183
- const delay = getExponentialBackoffDelay(retryCount);
184
- console.warn(`Get models failed with status ${response.status}. Retrying in ${delay}ms... (${retryCount + 1}/${MAX_RETRIES})`);
185
- await sleep(delay);
186
- return getModels(token, retryCount + 1); // 使用当前token重试,如果失败会在上面的逻辑中刷新
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
187
  }
188
-
189
- if (!response.ok) {
190
- throw new Error(`Failed to get models: ${response.status}`);
 
 
 
 
 
 
191
  }
192
-
193
- const data = await response.json() as ModelsResponse;
194
-
195
- // 请求成功后获取新token并存储 (后台刷新)
196
- getAnonymousToken().catch(err => console.error("Background token refresh failed:", err));
197
-
198
- return data.result;
 
199
  } catch (error) {
200
- console.error("Error getting models:", error);
201
- throw error;
 
 
 
 
 
202
  }
203
  }
204
 
205
- // 将SophNet模型转换为OpenAI格式
206
- function transformModelsToOpenAIFormat(models: SophNetModel[]) {
207
- const transformedModels = [];
208
-
209
- // 为每个模型创建标准版本、搜索版本和全上下文版本
210
- for (const model of models) {
211
- // 添加标准模型
212
- transformedModels.push({
213
- id: model.modelFamily,
214
- object: "model",
215
- created: Date.now(),
216
- owned_by: "sophnet",
217
- permission: [{
218
- id: `modelperm-${model.id}`,
219
- object: "model_permission",
220
- created: Date.now(),
221
- allow_create_engine: false,
222
- allow_sampling: true,
223
- allow_logprobs: false,
224
- allow_search_indices: false,
225
- allow_view: true,
226
- allow_fine_tuning: false,
227
- organization: "*",
228
- group: null,
229
- is_blocking: false,
230
- }],
231
- root: model.modelFamily,
232
- parent: null,
233
- });
234
-
235
- // 添加搜索版本模型
236
- transformedModels.push({
237
- id: `${model.modelFamily}-Search`,
238
- object: "model",
239
- created: Date.now(),
240
- owned_by: "sophnet",
241
- permission: [{
242
- id: `modelperm-${model.id}-Search`,
243
- object: "model_permission",
244
- created: Date.now(),
245
- allow_create_engine: false,
246
- allow_sampling: true,
247
- allow_logprobs: false,
248
- allow_search_indices: true,
249
- allow_view: true,
250
- allow_fine_tuning: false,
251
- organization: "*",
252
- group: null,
253
- is_blocking: false,
254
- }],
255
- root: model.modelFamily,
256
- parent: null,
257
- });
258
 
259
- // 添加全上下文版本模型
260
- transformedModels.push({
261
- id: `${model.modelFamily}-Full-Context`,
262
- object: "model",
263
- created: Date.now(),
264
- owned_by: "sophnet",
265
- permission: [{
266
- id: `modelperm-${model.id}-Full-Context`,
267
- object: "model_permission",
268
- created: Date.now(),
269
- allow_create_engine: false,
270
- allow_sampling: true,
271
- allow_logprobs: false,
272
- allow_search_indices: false,
273
- allow_view: true,
274
- allow_fine_tuning: false,
275
- organization: "*",
276
- group: null,
277
- is_blocking: false,
278
- }],
279
- root: model.modelFamily,
280
- parent: null,
281
  });
282
 
283
- // 添加全上下文+搜索版本模型
284
- transformedModels.push({
285
- id: `${model.modelFamily}-Full-Context-Search`,
286
- object: "model",
287
- created: Date.now(),
288
- owned_by: "sophnet",
289
- permission: [{
290
- id: `modelperm-${model.id}-Full-Context-Search`,
291
- object: "model_permission",
292
- created: Date.now(),
293
- allow_create_engine: false,
294
- allow_sampling: true,
295
- allow_logprobs: false,
296
- allow_search_indices: true,
297
- allow_view: true,
298
- allow_fine_tuning: false,
299
- organization: "*",
300
- group: null,
301
- is_blocking: false,
302
- }],
303
- root: model.modelFamily,
304
- parent: null,
305
  });
306
- }
307
 
308
- return {
309
- object: "list",
310
- data: transformedModels,
311
- };
312
- }
313
-
314
- // 处理全上下文功能
315
- function processFullContext(messages: Message[]): Message[] {
316
- // 复制消息数组,避免修改原数组
317
- const messagesCopy = [...messages];
318
-
319
- // 提取系统消息(如果存在)
320
- const systemMessages = messagesCopy.filter(msg => msg.role === "system");
321
-
322
- // 获取非系统消息
323
- const nonSystemMessages = messagesCopy.filter(msg => msg.role !== "system");
324
 
325
- // 如果消息总数少于或等于3对(6条消息),则不需要处理
326
- if (nonSystemMessages.length <= 6) {
327
- return messages;
 
 
 
328
  }
329
-
330
- // 提取最后3轮对话(最多6条消息)
331
- const recentMessages = nonSystemMessages.slice(-6);
332
-
333
- // 提取需要合并的历史消息
334
- const historyMessages = nonSystemMessages.slice(0, -6);
335
-
336
- // 创建历史消息的摘要
337
- const historySummary = {
338
- role: "user",
339
- content: `这里是此前的对话上下文: ${JSON.stringify(historyMessages)}`
340
- };
341
-
342
- // 组合新的消息数组:系统消息 + 历史摘要 + 最近消息
343
- return [...systemMessages, historySummary, ...recentMessages];
344
  }
345
 
346
- // 将数字转换为上标形式
347
- function convertToSuperscript(num: number): string {
348
- const normalDigits = '0123456789';
349
- const superscriptDigits = '⁰¹²³⁴⁵⁶⁷⁸⁹';
350
-
351
- return num.toString()
352
- .split('')
353
- .map(char => {
354
- const index = normalDigits.indexOf(char);
355
- return index !== -1 ? superscriptDigits[index] : char;
356
- })
357
- .join('');
358
- }
359
-
360
- // 处理聊天完成请求
361
- async function handleChatCompletions(
362
- token: string,
363
- requestBody: any,
364
- stream: boolean,
365
- retryCount = 0,
366
- ): Promise<Response> {
367
- // 检查模型名称的后缀
368
- const modelId = requestBody.model;
369
- const webSearchEnable = modelId.includes("-Search");
370
- const fullContextEnable = modelId.includes("-Full-Context");
371
-
372
- // 根据后缀确定实际模型ID
373
- let actualModelId = modelId;
374
- if (webSearchEnable) actualModelId = actualModelId.replace("-Search", "");
375
- if (fullContextEnable) actualModelId = actualModelId.replace("-Full-Context", "");
376
-
377
- // 处理消息
378
- let processedMessages = requestBody.messages;
379
- if (fullContextEnable) {
380
- processedMessages = processFullContext(requestBody.messages);
381
- }
382
-
383
- const sophNetBody = {
384
- temperature: requestBody.temperature || 0.7,
385
- top_p: requestBody.top_p || 0.9,
386
- frequency_penalty: requestBody.frequency_penalty || 0,
387
- presence_penalty: requestBody.presence_penalty || 0,
388
- max_tokens: requestBody.max_tokens || 2048,
389
- webSearchEnable: webSearchEnable,
390
- stop: requestBody.stop || [],
391
- stream: stream.toString(),
392
- model_id: actualModelId,
393
- messages: processedMessages,
394
- };
395
-
396
- try {
397
- const response = await fetch(
398
- `${SOPHNET_BASE_URL}/open-apis/projects/${PROJECT_UUID}/chat/completions`,
399
  {
400
- method: "POST",
401
- headers: {
402
- "Content-Type": "application/json",
403
- "Authorization": `Bearer anon-${token}`,
404
- "Accept": stream ? "text/event-stream" : "application/json",
405
- "User-Agent": getRandomUserAgent(), // 使用随机UA
406
  },
407
- body: JSON.stringify(sophNetBody),
408
  },
409
- );
410
-
411
- // 如果是401或403错误,尝试刷新token并重试
412
- if ((response.status === 401 || response.status === 403) && retryCount < MAX_RETRIES) {
413
- console.log(`Chat completion token expired, refreshing and retrying (${retryCount + 1}/${MAX_RETRIES})...`);
414
- const newToken = await getAnonymousToken();
415
- // 使用指数退避等待
416
- const delay = getExponentialBackoffDelay(retryCount);
417
- await sleep(delay);
418
- return await handleChatCompletions(newToken, requestBody, stream, retryCount + 1);
419
- }
420
 
421
- // 如果是 429 或 500 错误,进行指数退避重试
422
- if ((response.status === 429 || response.status >= 500) && retryCount < MAX_RETRIES) {
423
- const delay = getExponentialBackoffDelay(retryCount);
424
- console.warn(`Chat completion failed with status ${response.status}. Retrying in ${delay}ms... (${retryCount + 1}/${MAX_RETRIES})`);
425
- await sleep(delay);
426
- return handleChatCompletions(token, requestBody, stream, retryCount + 1); // 使用当前token重试,如果失败会在上面的逻辑中刷新
 
 
 
 
 
 
 
427
  }
428
-
429
- if (!response.ok) {
430
- throw new Error(`Chat completion failed: ${response.status}`);
 
 
 
 
 
 
 
 
 
 
 
 
 
431
  }
 
432
 
433
- // 请求成功后获取新token并存储 (后台刷新)
434
- getAnonymousToken().catch(err => console.error("Background token refresh failed:", err));
435
-
436
- return response;
 
 
 
 
 
 
 
 
 
 
437
 
438
- } catch (error) {
439
- console.error("Error during chat completion fetch:", error);
440
- // 如果是网络错误或其他非HTTP错误,也进行指数退避重试
441
- if (retryCount < MAX_RETRIES) {
442
- const delay = getExponentialBackoffDelay(retryCount);
443
- console.warn(`Chat completion network error. Retrying in ${delay}ms... (${retryCount + 1}/${MAX_RETRIES})`);
444
- await sleep(delay);
445
- return handleChatCompletions(token, requestBody, stream, retryCount + 1);
446
- }
447
- throw error; // 达到最大重试次数后抛出错误
448
  }
449
- }
450
-
451
- // 转换流式响应
452
- async function* transformStreamResponse(
453
- readableStream: ReadableStream<Uint8Array>,
454
- ) {
455
- const reader = readableStream.getReader();
456
- const decoder = new TextDecoder();
457
- let buffer = "";
458
 
459
- // 用于存储所有引用,以便在结束时生成参考资料部分
460
- const references: Reference[] = [];
461
- let referencesEmitted = false; // 标记是否已经发送过参考资料部分
462
-
463
- try {
464
- while (true) {
465
- const { done, value } = await reader.read();
466
- if (done) {
467
- // 如果有引用但尚未发送,在结束前发送参考资料部分
468
- if (references.length > 0 && !referencesEmitted) {
469
- const referencesSection = generateReferencesSection(references);
470
- yield `data: ${JSON.stringify({
471
- id: `chatcmpl-${Date.now()}`, // 生成一个唯一的ID
472
- object: "chat.completion.chunk",
473
- created: Math.floor(Date.now() / 1000),
474
- model: "sophnet-model", // 可以使用 SophNet 返回的模型名,或者固定一个
475
- choices: [
476
- {
477
- index: 0,
478
- delta: {
479
- content: `\n\n${referencesSection}`,
480
- },
481
- finish_reason: null, // 在发送参考资料时,finish_reason通常为null
482
- },
483
- ],
484
- })}\n\n`;
485
- referencesEmitted = true;
486
- }
487
- break;
488
- }
489
-
490
- buffer += decoder.decode(value, { stream: true });
491
- const lines = buffer.split("\n");
492
- buffer = lines.pop() || "";
493
-
494
- for (const line of lines) {
495
- if (line.trim() === "" || !line.startsWith("data:")) continue;
496
-
497
- const data = line.substring(5).trim();
498
- if (data === "[DONE]") {
499
- // 如果有引用但尚未发送,在结束前发送参考资料部分
500
- if (references.length > 0 && !referencesEmitted) {
501
- const referencesSection = generateReferencesSection(references);
502
- yield `data: ${JSON.stringify({
503
- id: `chatcmpl-${Date.now()}`, // 生成一个唯一的ID
504
- object: "chat.completion.chunk",
505
- created: Math.floor(Date.now() / 1000),
506
- model: "sophnet-model", // 可以使用 SophNet 返回的模型名,或者固定一个
507
- choices: [
508
- {
509
- index: 0,
510
- delta: {
511
- content: `\n\n${referencesSection}`,
512
- },
513
- finish_reason: null, // 在发送参考资料时,finish_reason通常为null
514
- },
515
- ],
516
- })}\n\n`;
517
- referencesEmitted = true;
518
- }
519
-
520
- yield "data: [DONE]\n\n";
521
- continue;
522
- }
523
-
524
- try {
525
- const sophNetEvent = JSON.parse(data);
526
-
527
- // 检查是否包含引用
528
- if (sophNetEvent.choices?.[0]?.refs && sophNetEvent.choices[0].refs.length > 0) {
529
- // 处理引用
530
- for (const ref of sophNetEvent.choices[0].refs) {
531
- // 检查是否已经存在相同URL的引用
532
- const existingRefIndex = references.findIndex(r => r.url === ref.url);
533
- if (existingRefIndex === -1) {
534
- // 添加新引用
535
- references.push(ref);
536
-
537
- // 生成引用标记,使用上标数字
538
- const refIndex = references.length;
539
- const superscriptIndex = `⁽${convertToSuperscript(refIndex)}⁾`;
540
-
541
- // 创建带引用标记的事件
542
- yield `data: ${JSON.stringify({
543
- id: sophNetEvent.id || `chatcmpl-${Date.now()}`, // 使用SophNet ID或生成新ID
544
  object: "chat.completion.chunk",
545
  created: Math.floor(Date.now() / 1000),
546
- model: sophNetEvent.model || "sophnet-model", // 使用SophNet模型名或固定
547
- choices: [
548
- {
549
- index: 0,
550
- delta: {
551
- // SophNet的引用信息通常不在content中,我们需要手动添加
552
- content: `[${superscriptIndex}](${ref.url})`,
553
- },
554
- finish_reason: null, // 引用事件通常没有finish_reason
555
- },
556
- ],
557
- })}\n\n`;
 
 
 
 
 
 
 
 
 
558
  }
 
 
 
559
  }
560
- } else {
561
- // 转换为OpenAI格式的事件
562
- const openAIEvent = {
563
- id: sophNetEvent.id || `chatcmpl-${Date.now()}`, // 使用SophNet ID或生成新ID
564
- object: "chat.completion.chunk",
565
- created: Math.floor(Date.now() / 1000),
566
- model: sophNetEvent.model || "sophnet-model", // 使用SophNet模型名或固定
567
- choices: [
568
- {
569
- index: 0,
570
- delta: {
571
- // OpenAI通常将内容放在delta.content
572
- reasoning_content: sophNetEvent.choices?.[0]?.delta?.reasoning_content || "",
573
- content: sophNetEvent.choices?.[0]?.delta?.content || "",
574
- },
575
- finish_reason: sophNetEvent.choices?.[0]?.finish_reason || null,
576
- },
577
- ],
578
- };
579
-
580
- yield `data: ${JSON.stringify(openAIEvent)}\n\n`;
581
  }
582
- } catch (e) {
583
- console.error("Error parsing event:", e, "Line:", line);
584
- // 可以选择在这里发送一个错误事件给客户端
585
  }
 
 
 
 
 
 
 
586
  }
587
  }
588
- } finally {
589
- reader.releaseLock();
590
- }
591
- }
592
-
593
- // 生成参考资料部分
594
- function generateReferencesSection(references: Reference[]): string {
595
- if (references.length === 0) return "";
596
-
597
- let section = "## 参考资料\n\n";
598
- references.forEach((ref, index) => {
599
- section += `${index + 1}. [${ref.title}](${ref.url})\n`;
600
  });
601
 
602
- return section;
603
- }
604
-
605
- // 转换非流式响应
606
- async function transformNonStreamResponse(response: Response) {
607
- const sophNetResponse = await response.json();
608
-
609
- // 处理引用
610
- let content = sophNetResponse.choices?.[0]?.message?.content || "";
611
- const references: Reference[] = [];
612
-
613
- // 收集所有引用
614
- if (sophNetResponse.choices?.[0]?.message?.refs && sophNetResponse.choices[0].message.refs.length > 0) {
615
- for (const ref of sophNetResponse.choices[0].message.refs) {
616
- references.push(ref);
617
- }
618
-
619
- // 为每个引用添加上标标记
620
- references.forEach((ref, index) => {
621
- const refIndex = index + 1;
622
- const superscriptIndex = `⁽${convertToSuperscript(refIndex)}⁾`;
623
- // 在内容末尾添加引用标记
624
- content += ` [${superscriptIndex}](${ref.url})`;
625
- });
626
-
627
- // 添加参考资料部分
628
- if (references.length > 0) {
629
- content += "\n\n" + generateReferencesSection(references);
630
  }
631
- }
632
-
633
- return {
634
- id: sophNetResponse.id || `chatcmpl-${Date.now()}`, // 使用SophNet ID或生成新ID
635
- object: "chat.completion",
636
- created: Math.floor(Date.now() / 1000),
637
- model: sophNetResponse.model || "sophnet-model", // 使用SophNet模型名或固定
638
- choices: [
639
- {
640
- index: 0,
641
- message: {
642
- role: "assistant",
643
- reasoning_content: sophNetResponse.choices?.[0]?.message?.reasoning_content || "",
644
- content: content,
645
- },
646
- finish_reason: sophNetResponse.choices?.[0]?.finish_reason || "stop",
647
- },
648
- ],
649
- usage: sophNetResponse.usage || {
650
- prompt_tokens: 0,
651
- completion_tokens: 0,
652
- total_tokens: 0,
653
- },
654
- };
655
  }
656
 
657
- // 主处理函数
658
- async function handler(req: Request): Promise<Response> {
659
- const url = new URL(req.url);
660
  const path = url.pathname;
661
 
662
- // CORS预检请求处理
663
- if (req.method === "OPTIONS") {
664
  return new Response(null, {
665
- status: 204,
666
  headers: {
667
  "Access-Control-Allow-Origin": "*",
668
  "Access-Control-Allow-Methods": "GET, POST, OPTIONS",
669
  "Access-Control-Allow-Headers": "Content-Type, Authorization",
670
- "Access-Control-Max-Age": "86400",
671
  },
672
  });
673
  }
674
 
675
- // 获取有效token
676
- let token;
677
- try {
678
- token = await getValidToken();
679
- } catch (error) {
680
- console.error("Failed to get token in handler:", error);
681
- return new Response(
682
- JSON.stringify({ error: "Failed to get token", details: error.message }),
683
- {
684
- status: 500,
685
- headers: {
686
- "Content-Type": "application/json",
687
- "Access-Control-Allow-Origin": "*",
688
- },
689
- },
690
- );
691
- }
692
 
693
  try {
694
  // 模型列表接口
695
- if (path === "/v1/models" && req.method === "GET") {
696
- const models = await getModels(token);
697
  const openAIModels = transformModelsToOpenAIFormat(models);
698
-
699
- return new Response(JSON.stringify(openAIModels), {
700
- status: 200,
701
- headers: {
702
- "Content-Type": "application/json",
703
- "Access-Control-Allow-Origin": "*",
704
- },
705
- });
706
  }
707
-
708
  // 聊天完成接口
709
- else if (path === "/v1/chat/completions" && req.method === "POST") {
710
- const requestBody = await req.json();
711
- const stream = requestBody.stream === true;
712
-
713
- const sophNetResponse = await handleChatCompletions(token, requestBody, stream);
714
-
 
715
  if (stream) {
716
- const transformedStream = new ReadableStream({
717
- async start(controller) {
718
- try {
719
- for await (const chunk of transformStreamResponse(sophNetResponse.body!)) {
720
- controller.enqueue(new TextEncoder().encode(chunk));
721
- }
722
- controller.close();
723
- } catch (error) {
724
- console.error("Stream transformation error:", error);
725
- // 在流中发送错误信息
726
- const errorData = JSON.stringify({
727
- error: {
728
- message: `Stream processing error: ${error.message}`,
729
- type: "stream_error",
730
- code: null,
731
- }
732
- });
733
- controller.enqueue(new TextEncoder().encode(`data: ${errorData}\n\n`));
734
- controller.enqueue(new TextEncoder().encode("data: [DONE]\n\n"));
735
- controller.close();
736
- }
737
- },
738
- });
739
-
740
- return new Response(transformedStream, {
741
- headers: {
742
- "Content-Type": "text/event-stream",
743
- "Cache-Control": "no-cache",
744
- "Connection": "keep-alive",
745
- "Access-Control-Allow-Origin": "*",
746
- },
747
- });
748
- } else {
749
- const transformedResponse = await transformNonStreamResponse(sophNetResponse);
750
-
751
- return new Response(JSON.stringify(transformedResponse), {
752
- status: 200,
753
- headers: {
754
- "Content-Type": "application/json",
755
- "Access-Control-Allow-Origin": "*",
756
- },
757
- });
758
  }
 
 
 
 
 
 
759
  }
760
-
761
- // 未找到路由
762
  else {
763
- return new Response(
764
- JSON.stringify({ error: "Not found", message: "Endpoint not supported" }),
765
- {
766
- status: 404,
767
- headers: {
768
- "Content-Type": "application/json",
769
- "Access-Control-Allow-Origin": "*",
770
- },
771
- },
772
- );
773
- }
774
- } catch (error) {
775
- console.error("Request handling error:", error);
776
-
777
- // 返回统一的错误响应格式
778
- return new Response(
779
- JSON.stringify({
780
  error: {
781
- message: error.message,
782
- type: "api_error", // 或者更具体的错误类型
783
- code: error.status || null, // 如果是HTTP错误,包含状态码
784
  }
785
- }),
786
- {
787
- status: error.status || 500, // 使用错误状态码或默认500
788
- headers: {
789
- "Content-Type": "application/json",
790
- "Access-Control-Allow-Origin": "*",
791
- },
792
- },
793
- );
 
 
794
  }
795
  }
796
 
797
  // 启动服务器
798
- console.log(`Starting server on port ${PORT}...`);
799
- serve(handler, { port: PORT });
 
 
1
+ // freeai_proxy.ts
2
+ import { serve } from "https://deno.land/[email protected]/http/server.ts";
3
 
4
+ const FREEAI_API_BASE = "https://freeaichatplayground.com/api/v1";
5
+ const DEFAULT_MODEL = "Deepseek R1";
 
 
 
 
 
 
6
 
7
+ // 获取可用模型列表
8
+ async function fetchModels() {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9
  try {
10
+ const response = await fetch(`${FREEAI_API_BASE}/models`, {
11
+ method: "POST",
12
  headers: {
13
+ "Content-Type": "application/json",
14
+ "User-Agent": "Mozilla/5.0 (X11; Linux x86_64; rv:137.0) Gecko/20100101 Firefox/137.0",
15
+ "Origin": "https://freeaichatplayground.com",
16
+ "Referer": "https://freeaichatplayground.com/chat",
17
  },
18
+ body: JSON.stringify({ type: "text" }),
19
  });
20
 
 
 
 
 
 
 
 
 
21
  if (!response.ok) {
22
+ throw new Error(`Failed to fetch models: ${response.status}`);
23
  }
24
 
25
+ const models = await response.json();
26
+ return models;
 
27
  } catch (error) {
28
+ console.error("Error fetching models:", error);
29
+ return [];
30
  }
31
  }
32
 
33
+ // 转换为 OpenAI 格式的模型列表
34
+ function transformModelsToOpenAIFormat(models) {
35
+ return {
36
+ object: "list",
37
+ data: models.map(model => ({
38
+ id: model.name,
39
+ object: "model",
40
+ created: new Date(model.createdAt).getTime() / 1000,
41
+ owned_by: model.provider,
42
+ permission: [],
43
+ root: model.name,
44
+ parent: null,
45
+ })),
46
+ };
47
  }
48
 
49
+ // 解析 SSE 格式的响应
50
+ async function parseSSEResponse(response) {
51
+ const reader = response.body.getReader();
52
+ let content = "";
53
+ let id = `chatcmpl-${Date.now()}`;
54
+ let finishReason = "stop";
55
+
56
  try {
57
+ while (true) {
58
+ const { done, value } = await reader.read();
59
+ if (done) break;
60
+
61
+ const chunk = new TextDecoder().decode(value);
62
+ content += chunk;
 
 
 
 
 
 
 
 
 
 
 
63
  }
64
 
65
+ // 解析所有 SSE 消息
66
+ const messages = content.split('\n\n')
67
+ .filter(msg => msg.trim().startsWith('data:'))
68
+ .map(msg => {
69
+ const jsonStr = msg.replace('data:', '').trim();
70
+ try {
71
+ return JSON.parse(jsonStr);
72
+ } catch (e) {
73
+ console.warn("Failed to parse SSE message:", jsonStr);
74
+ return null;
75
+ }
76
+ })
77
+ .filter(Boolean);
78
+
79
+ // 找到最后一条完整消息
80
+ const lastCompleteMessage = messages.findLast(msg =>
81
+ msg.choices && msg.choices[0] && msg.choices[0].message && msg.choices[0].message.content
82
+ );
83
+
84
+ if (lastCompleteMessage) {
85
+ id = lastCompleteMessage.id || id;
86
+ if (lastCompleteMessage.choices &&
87
+ lastCompleteMessage.choices[0] &&
88
+ lastCompleteMessage.choices[0].finish_reason) {
89
+ finishReason = lastCompleteMessage.choices[0].finish_reason;
90
+ }
91
+
92
+ return {
93
+ id,
94
+ content: lastCompleteMessage.choices[0].message.content,
95
+ finish_reason: finishReason,
96
+ usage: lastCompleteMessage.usage || null
97
+ };
98
  }
99
+
100
+ // 如果没有找到完整消息,尝试从所有消息中提取内容
101
+ let combinedContent = "";
102
+ for (const msg of messages) {
103
+ if (msg.choices && msg.choices[0] && msg.choices[0].delta && msg.choices[0].delta.content) {
104
+ combinedContent += msg.choices[0].delta.content;
105
+ } else if (msg.choices && msg.choices[0] && msg.choices[0].message && msg.choices[0].message.content) {
106
+ combinedContent += msg.choices[0].message.content;
107
+ }
108
  }
109
+
110
+ return {
111
+ id,
112
+ content: combinedContent || "No content found in response",
113
+ finish_reason: finishReason,
114
+ usage: null
115
+ };
116
+
117
  } catch (error) {
118
+ console.error("Error parsing SSE response:", error);
119
+ return {
120
+ id,
121
+ content: "Error parsing response: " + error.message,
122
+ finish_reason: "error",
123
+ usage: null
124
+ };
125
  }
126
  }
127
 
128
+ // 发送聊天请求到 freeaichatplayground
129
+ async function sendChatRequest(modelName, messages) {
130
+ try {
131
+ const formattedMessages = messages.map((msg, index) => ({
132
+ id: `${Date.now() + index}`,
133
+ role: msg.role,
134
+ content: msg.content,
135
+ model: {
136
+ id: "", // 这个ID会在下面被填充
137
+ name: modelName,
138
+ icon: "",
139
+ provider: "",
140
+ contextWindow: 63920
141
+ }
142
+ }));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
143
 
144
+ // 获取模型列表以找到正确的ID
145
+ const models = await fetchModels();
146
+ const selectedModel = models.find(m => m.name === modelName);
147
+
148
+ if (!selectedModel) {
149
+ throw new Error(`Model "${modelName}" not found`);
150
+ }
151
+
152
+ // 填充模型信息
153
+ formattedMessages.forEach(msg => {
154
+ if (msg.model) {
155
+ msg.model.id = selectedModel.id;
156
+ msg.model.icon = selectedModel.icon;
157
+ msg.model.provider = selectedModel.provider;
158
+ }
 
 
 
 
 
 
 
159
  });
160
 
161
+ const response = await fetch(`${FREEAI_API_BASE}/chat/completions`, {
162
+ method: "POST",
163
+ headers: {
164
+ "Content-Type": "application/json",
165
+ "User-Agent": "Mozilla/5.0 (X11; Linux x86_64; rv:137.0) Gecko/20100101 Firefox/137.0",
166
+ "Origin": "https://freeaichatplayground.com",
167
+ "Referer": "https://freeaichatplayground.com/chat",
168
+ },
169
+ body: JSON.stringify({
170
+ model: modelName,
171
+ messages: formattedMessages,
172
+ }),
 
 
 
 
 
 
 
 
 
 
173
  });
 
174
 
175
+ if (!response.ok) {
176
+ const errorText = await response.text();
177
+ throw new Error(`Chat completion failed: ${response.status} - ${errorText}`);
178
+ }
 
 
 
 
 
 
 
 
 
 
 
 
179
 
180
+ // 处理 SSE 流式响应
181
+ const parsedResponse = await parseSSEResponse(response);
182
+ return parsedResponse;
183
+ } catch (error) {
184
+ console.error("Error in chat completion:", error);
185
+ throw error;
186
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
187
  }
188
 
189
+ // 转换为 OpenAI 格式的聊天响应
190
+ function transformChatResponseToOpenAIFormat(response, modelName) {
191
+ return {
192
+ id: response.id || `chatcmpl-${Date.now()}`,
193
+ object: "chat.completion",
194
+ created: Math.floor(Date.now() / 1000),
195
+ model: modelName,
196
+ choices: [
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
197
  {
198
+ index: 0,
199
+ message: {
200
+ role: "assistant",
201
+ content: response.content,
 
 
202
  },
203
+ finish_reason: response.finish_reason || "stop",
204
  },
205
+ ],
206
+ usage: response.usage || {
207
+ prompt_tokens: 0,
208
+ completion_tokens: 0,
209
+ total_tokens: 0,
210
+ },
211
+ };
212
+ }
 
 
 
213
 
214
+ // 处理流式响应请求
215
+ async function handleStreamRequest(request, modelName, messages) {
216
+ const encoder = new TextEncoder();
217
+ const formattedMessages = messages.map((msg, index) => ({
218
+ id: `${Date.now() + index}`,
219
+ role: msg.role,
220
+ content: msg.content,
221
+ model: {
222
+ id: "", // 这个ID会在下面被填充
223
+ name: modelName,
224
+ icon: "",
225
+ provider: "",
226
+ contextWindow: 63920
227
  }
228
+ }));
229
+
230
+ // 获取模型列表以找到正确的ID
231
+ const models = await fetchModels();
232
+ const selectedModel = models.find(m => m.name === modelName);
233
+
234
+ if (!selectedModel) {
235
+ throw new Error(`Model "${modelName}" not found`);
236
+ }
237
+
238
+ // 填充模型信息
239
+ formattedMessages.forEach(msg => {
240
+ if (msg.model) {
241
+ msg.model.id = selectedModel.id;
242
+ msg.model.icon = selectedModel.icon;
243
+ msg.model.provider = selectedModel.provider;
244
  }
245
+ });
246
 
247
+ const response = await fetch(`${FREEAI_API_BASE}/chat/completions`, {
248
+ method: "POST",
249
+ headers: {
250
+ "Content-Type": "application/json",
251
+ "User-Agent": "Mozilla/5.0 (X11; Linux x86_64; rv:137.0) Gecko/20100101 Firefox/137.0",
252
+ "Origin": "https://freeaichatplayground.com",
253
+ "Referer": "https://freeaichatplayground.com/chat",
254
+ },
255
+ body: JSON.stringify({
256
+ model: modelName,
257
+ messages: formattedMessages,
258
+ stream: true,
259
+ }),
260
+ });
261
 
262
+ if (!response.ok) {
263
+ const errorText = await response.text();
264
+ throw new Error(`Chat completion failed: ${response.status} - ${errorText}`);
 
 
 
 
 
 
 
265
  }
 
 
 
 
 
 
 
 
 
266
 
267
+ const stream = new ReadableStream({
268
+ async start(controller) {
269
+ const reader = response.body.getReader();
270
+ const chatId = `chatcmpl-${Date.now()}`;
271
+
272
+ // 发送初始消息
273
+ const initialChunk = {
274
+ id: chatId,
275
+ object: "chat.completion.chunk",
276
+ created: Math.floor(Date.now() / 1000),
277
+ model: modelName,
278
+ choices: [{
279
+ index: 0,
280
+ delta: { role: "assistant" },
281
+ finish_reason: null
282
+ }]
283
+ };
284
+ controller.enqueue(encoder.encode(`data: ${JSON.stringify(initialChunk)}\n\n`));
285
+
286
+ try {
287
+ let buffer = "";
288
+
289
+ while (true) {
290
+ const { done, value } = await reader.read();
291
+ if (done) break;
292
+
293
+ const chunk = new TextDecoder().decode(value);
294
+ buffer += chunk;
295
+
296
+ // 处理缓冲区中的所有完整 SSE 消息
297
+ const messages = buffer.split('\n\n');
298
+ buffer = messages.pop() || ""; // 保留最后一个可能不完整的消息
299
+
300
+ for (const msg of messages) {
301
+ if (!msg.trim().startsWith('data:')) continue;
302
+
303
+ try {
304
+ const jsonStr = msg.replace('data:', '').trim();
305
+ const data = JSON.parse(jsonStr);
306
+
307
+ if (data.choices && data.choices[0]) {
308
+ // 转换为 OpenAI 流式格式
309
+ const openAIChunk = {
310
+ id: chatId,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
311
  object: "chat.completion.chunk",
312
  created: Math.floor(Date.now() / 1000),
313
+ model: modelName,
314
+ choices: [{
315
+ index: 0,
316
+ delta: {},
317
+ finish_reason: data.choices[0].finish_reason || null
318
+ }]
319
+ };
320
+
321
+ // 提取内容
322
+ if (data.choices[0].delta && data.choices[0].delta.content) {
323
+ openAIChunk.choices[0].delta.content = data.choices[0].delta.content;
324
+ } else if (data.choices[0].message && data.choices[0].message.content) {
325
+ openAIChunk.choices[0].delta.content = data.choices[0].message.content;
326
+ }
327
+
328
+ controller.enqueue(encoder.encode(`data: ${JSON.stringify(openAIChunk)}\n\n`));
329
+
330
+ // 如果是最后一条消息,发送 [DONE]
331
+ if (data.choices[0].finish_reason) {
332
+ controller.enqueue(encoder.encode("data: [DONE]\n\n"));
333
+ }
334
  }
335
+ } catch (e) {
336
+ console.warn("Failed to parse SSE message:", msg);
337
+ continue;
338
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
339
  }
 
 
 
340
  }
341
+
342
+ // 确保发送最终的 [DONE] 消息
343
+ controller.enqueue(encoder.encode("data: [DONE]\n\n"));
344
+ controller.close();
345
+ } catch (error) {
346
+ console.error("Stream processing error:", error);
347
+ controller.error(error);
348
  }
349
  }
 
 
 
 
 
 
 
 
 
 
 
 
350
  });
351
 
352
+ return new Response(stream, {
353
+ headers: {
354
+ "Content-Type": "text/event-stream",
355
+ "Cache-Control": "no-cache",
356
+ "Connection": "keep-alive",
357
+ "Access-Control-Allow-Origin": "*",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
358
  }
359
+ });
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
360
  }
361
 
362
+ // 处理请求
363
+ async function handleRequest(request) {
364
+ const url = new URL(request.url);
365
  const path = url.pathname;
366
 
367
+ // CORS 预检请求处理
368
+ if (request.method === "OPTIONS") {
369
  return new Response(null, {
370
+ status: 200,
371
  headers: {
372
  "Access-Control-Allow-Origin": "*",
373
  "Access-Control-Allow-Methods": "GET, POST, OPTIONS",
374
  "Access-Control-Allow-Headers": "Content-Type, Authorization",
 
375
  },
376
  });
377
  }
378
 
379
+ // 设置通用响应头
380
+ const headers = {
381
+ "Content-Type": "application/json",
382
+ "Access-Control-Allow-Origin": "*",
383
+ };
 
 
 
 
 
 
 
 
 
 
 
 
384
 
385
  try {
386
  // 模型列表接口
387
+ if (path === "/v1/models" && request.method === "GET") {
388
+ const models = await fetchModels();
389
  const openAIModels = transformModelsToOpenAIFormat(models);
390
+ return new Response(JSON.stringify(openAIModels), { headers });
 
 
 
 
 
 
 
391
  }
392
+
393
  // 聊天完成接口
394
+ else if (path === "/v1/chat/completions" && request.method === "POST") {
395
+ const requestData = await request.json();
396
+ const modelName = requestData.model || DEFAULT_MODEL;
397
+ const messages = requestData.messages || [];
398
+ const stream = requestData.stream || false;
399
+
400
+ // 处理流式响应
401
  if (stream) {
402
+ return handleStreamRequest(request, modelName, messages);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
403
  }
404
+
405
+ // 处理普通响应
406
+ const chatResponse = await sendChatRequest(modelName, messages);
407
+ const openAIResponse = transformChatResponseToOpenAIFormat(chatResponse, modelName);
408
+
409
+ return new Response(JSON.stringify(openAIResponse), { headers });
410
  }
411
+
412
+ // 未知路径
413
  else {
414
+ return new Response(JSON.stringify({
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
415
  error: {
416
+ message: "Not found",
417
+ type: "invalid_request_error",
418
+ code: "path_not_found",
419
  }
420
+ }), { status: 404, headers });
421
+ }
422
+ } catch (error) {
423
+ console.error("Error handling request:", error);
424
+ return new Response(JSON.stringify({
425
+ error: {
426
+ message: error.message,
427
+ type: "server_error",
428
+ code: "internal_server_error",
429
+ }
430
+ }), { status: 500, headers });
431
  }
432
  }
433
 
434
  // 启动服务器
435
+ const port = parseInt(Deno.env.get("PORT") || "7860");
436
+ console.log(`Starting server on port ${port}...`);
437
+ serve(handleRequest, { port });