hongshi-files commited on
Commit
720fe41
·
verified ·
1 Parent(s): 430c991

Update main.ts

Browse files
Files changed (1) hide show
  1. main.ts +411 -334
main.ts CHANGED
@@ -1,352 +1,429 @@
1
- import { serve } from "https://deno.land/std@0.208.0/http/server.ts";
2
-
3
- // Julep API Base URL (fixed)
4
- const JULEP_API_BASE = "https://api.julep.ai/api";
5
-
6
- // Hardcoded list of models (Agent IDs in this context)
7
- const HARDCODED_MODELS = [
8
- 'mistral-large-2411', 'o1', 'text-embedding-3-large', 'vertex_ai/text-embedding-004',
9
- 'claude-3.5-haiku', 'cerebras/llama-4-scout-17b-16e-instruct', 'llama-3.1-8b',
10
- 'magnum-v4-72b', 'voyage-multilingual-2', 'claude-3-haiku', 'gpt-4o',
11
- 'BAAI/bge-m3', 'openrouter/meta-llama/llama-4-maverick', 'openrouter/meta-llama/llama-4-scout',
12
- 'claude-3.5-sonnet', 'hermes-3-llama-3.1-70b', 'claude-3.5-sonnet-20240620',
13
- 'qwen-2.5-72b-instruct', 'l3.3-euryale-70b', 'gpt-4o-mini', 'cerebras/llama-3.3-70b',
14
- 'o1-preview', 'gemini-1.5-pro-latest', 'l3.1-euryale-70b', 'claude-3-sonnet',
15
- 'Alibaba-NLP/gte-large-en-v1.5', 'openrouter/meta-llama/llama-4-scout:free',
16
- 'llama-3.1-70b', 'eva-qwen-2.5-72b', 'claude-3.5-sonnet-20241022', 'gemini-2.0-flash',
17
- 'deepseek-chat', 'o1-mini', 'eva-llama-3.33-70b', 'gemini-2.5-pro-preview-03-25',
18
- 'gemini-1.5-pro', 'gpt-4-turbo', 'openrouter/meta-llama/llama-4-maverick:free',
19
- 'o3-mini', 'claude-3.7-sonnet', 'voyage-3', 'cerebras/llama-3.1-8b', 'claude-3-opus'
20
- ];
21
-
22
- // Helper function to get Julep API Key from Authorization header
23
- function getJulepApiKey(req: Request): string | null {
24
- const authHeader = req.headers.get("Authorization");
25
- if (authHeader && authHeader.startsWith("Bearer ")) {
26
- return authHeader.substring(7); // Extract the token after "Bearer "
27
- }
28
- return null;
29
- }
30
-
31
- // OpenAI Models endpoint handler (hardcoded)
32
- async function handleModels(req: Request): Promise<Response> {
33
- const julepApiKey = getJulepApiKey(req);
34
- if (!julepApiKey) {
35
- return new Response("Unauthorized: Missing or invalid Authorization header", { status: 401 });
36
- }
37
-
38
- // Format hardcoded models into OpenAI models format
39
- const openaiModels = HARDCODED_MODELS.map((modelId) => ({
40
- id: modelId,
41
- object: "model",
42
- created: Math.floor(Date.now() / 1000), // Use current time for creation
43
- owned_by: "julep", // Or "openai" if you prefer
44
- permission: [
45
- {
46
- id: `modelperm-${modelId}`,
47
- object: "model_permission",
48
- created: Math.floor(Date.now() / 1000),
49
- allow_create_engine: false,
50
- allow_sampling: true,
51
- allow_logprobs: true,
52
- allow_search_indices: false,
53
- allow_view: true,
54
- allow_fine_tuning: false,
55
- organization: "*",
56
- group: null,
57
- is_blocking: false,
58
- },
59
- ],
60
- root: modelId,
61
- parent: null,
62
- }));
63
-
64
- return new Response(JSON.stringify({ data: openaiModels, object: "list" }), {
65
- headers: { "Content-Type": "application/json" },
66
- status: 200,
67
- });
68
- }
69
-
70
- // OpenAI Chat Completions endpoint handler
71
- async function handleChatCompletions(req: Request): Promise<Response> {
72
- const julepApiKey = getJulepApiKey(req);
73
- if (!julepApiKey) {
74
- return new Response("Unauthorized: Missing or invalid Authorization header", { status: 401 });
75
- }
76
-
77
- const headers = {
78
- "Authorization": `Bearer ${julepApiKey}`,
79
- "Content-Type": "application/json",
80
- };
81
-
82
- let agentId: string | null = null; // Variable to store the created agent ID
83
- let sessionId: string | null = null; // Variable to store the created session ID
84
-
85
- try {
86
- const requestBody = await req.json();
87
- const { model, messages, stream, ...rest } = requestBody;
88
-
89
- if (!model || !messages || !Array.isArray(messages) || messages.length === 0) {
90
- return new Response("Invalid request body. 'model' and 'messages' are required.", { status: 400 });
91
  }
92
-
93
- // Check if the requested model is in our hardcoded list
94
- if (!HARDCODED_MODELS.includes(model)) {
95
- return new Response(`Invalid model: ${model}. Please use one of the available models.`, { status: 400 });
96
  }
97
-
98
- // 1. Create a new Agent for this request
99
- const createAgentUrl = `${JULEP_API_BASE}/agents`;
100
- const createAgentBody = {
101
- name: model, // Set agent name to the model value
102
- model: model, // Use the requested OpenAI model as the Julep Agent's model
103
- about: model, // Set agent about to the model value
104
- instructions: ["Follow user instructions carefully."], // Keep some default instructions
105
- };
106
-
107
- const createAgentResponse = await fetch(createAgentUrl, {
108
- method: "POST",
109
- headers,
110
- body: JSON.stringify(createAgentBody),
111
- });
112
-
113
- if (!createAgentResponse.ok) {
114
- const errorText = await createAgentResponse.text();
115
- console.error(`Error creating Julep Agent: ${createAgentResponse.status} - ${errorText}`);
116
- return new Response(`Error creating Julep Agent: ${createAgentResponse.statusText}`, { status: createAgentResponse.status });
117
  }
118
 
119
- const agentData = await createAgentResponse.json();
120
- agentId = agentData.id; // Store the agent ID
121
 
122
- // 2. Create a Session using the new Agent ID
123
- const createSessionUrl = `${JULEP_API_BASE}/sessions`;
124
- const createSessionBody = {
125
- agent: agentId, // Use the newly created Agent ID
126
- // You can add other Session creation parameters here if needed
 
 
 
 
 
 
 
127
  };
128
-
129
- const createSessionResponse = await fetch(createSessionUrl, {
130
- method: "POST",
131
- headers,
132
- body: JSON.stringify(createSessionBody),
133
- });
134
-
135
- if (!createSessionResponse.ok) {
136
- const errorText = await createSessionResponse.text();
137
- console.error(`Error creating Julep Session: ${createSessionResponse.status} - ${errorText}`);
138
- // Attempt to clean up the temporary agent
139
- if (agentId) {
140
- fetch(`${JULEP_API_BASE}/agents/${agentId}`, { method: "DELETE", headers }).catch(console.error);
141
- }
142
- return new Response(`Error creating Julep Session: ${createSessionResponse.statusText}`, { status: createSessionResponse.status });
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
143
  }
144
 
145
- const sessionData = await createSessionResponse.json();
146
- sessionId = sessionData.id; // Store the session ID
147
-
148
- // 3. Perform Chat Completion
149
- const chatUrl = `${JULEP_API_BASE}/sessions/${sessionId}/chat`;
150
- const chatBody = {
151
- messages: messages.map((msg: any) => ({
152
- role: msg.role,
153
- content: typeof msg.content === 'string' ? msg.content : JSON.stringify(msg.content), // Handle potential object content
154
- // Map other relevant fields if necessary
155
- })),
156
- stream: stream === true,
157
- ...rest, // Forward any other parameters from the OpenAI request
158
- };
159
 
160
- const chatResponse = await fetch(chatUrl, {
161
- method: "POST",
162
- headers,
163
- body: JSON.stringify(chatBody),
164
- });
165
-
166
- // 4. Handle Response and Clean Up
167
- if (!chatResponse.ok) {
168
- // If the chat request itself fails, read the error body and then clean up
169
- const errorText = await chatResponse.text();
170
- console.error(`Error during Julep Chat Completion: ${chatResponse.status} - ${errorText}`);
171
- // Attempt to clean up the temporary agent and session
172
- if (sessionId) {
173
- fetch(`${JULEP_API_BASE}/sessions/${sessionId}`, { method: "DELETE", headers }).catch(console.error);
174
- }
175
- if (agentId) {
176
- fetch(`${JULEP_API_BASE}/agents/${agentId}`, { method: "DELETE", headers }).catch(console.error);
177
- }
178
- return new Response(`Error during Julep Chat Completion: ${chatResponse.statusText} - ${errorText}`, { status: chatResponse.status });
179
  }
180
 
181
- if (stream) {
182
- // Handle streaming response (Server-Sent Events)
183
- // Pipe the Julep response body directly to the client response body
184
- // and add cleanup to the end of the stream.
185
- const readableStream = chatResponse.body!.pipeThrough(new TextDecoderStream()).pipeThrough(new TransformStream({
186
- transform(chunk, controller) {
187
- // Parse Julep streaming chunks and format as OpenAI SSE
188
- const lines = chunk.split('\n').filter(line => line.trim() !== '');
189
- for (const line of lines) {
190
- if (line.startsWith('data:')) {
191
- const data = JSON.parse(line.substring(5).trim());
192
- // Format the Julep chunk data into OpenAI SSE format
193
- const openaiChunk = {
194
- id: data.id,
195
- object: "chat.completion.chunk",
196
- created: Math.floor(new Date(data.created_at).getTime() / 1000),
197
- model: model, // Use the requested model ID
198
- choices: data.choices.map((choice: any) => ({
199
- index: choice.index,
200
- delta: {
201
- role: choice.delta.role,
202
- content: choice.delta.content,
203
- tool_calls: choice.delta.tool_calls ? toolCallDeltaToOpenAI(choice.delta.tool_calls) : undefined,
204
- },
205
- finish_reason: choice.finish_reason,
206
- })),
207
- };
208
- controller.enqueue(`data: ${JSON.stringify(openaiChunk)}\n\n`);
209
- } else {
210
- // Pass through non-data lines like comments or empty lines if needed
211
- controller.enqueue(`${line}\n`);
 
 
 
 
 
 
212
  }
213
- }
214
- },
215
- }));
216
-
217
- // Attach cleanup to the end of the stream
218
- // We need to duplicate the stream to be able to pipe it to the client response
219
- // AND to a WritableStream for cleanup.
220
- const [stream1, stream2] = readableStream.tee();
221
-
222
- const cleanupPromise = new Promise<void>((resolve, reject) => {
223
- stream2.pipeTo(new WritableStream({
224
- close: () => {
225
- if (sessionId) {
226
- fetch(`${JULEP_API_BASE}/sessions/${sessionId}`, { method: "DELETE", headers }).catch(console.error);
227
- }
228
- if (agentId) {
229
- fetch(`${JULEP_API_BASE}/agents/${agentId}`, { method: "DELETE", headers }).catch(console.error);
230
- }
231
- resolve();
232
- },
233
- abort: (reason) => {
234
- console.error("Stream aborted:", reason);
235
- if (sessionId) {
236
- fetch(`${JULEP_API_BASE}/sessions/${sessionId}`, { method: "DELETE", headers }).catch(console.error);
237
- }
238
- if (agentId) {
239
- fetch(`${JULEP_API_BASE}/agents/${agentId}`, { method: "DELETE", headers }).catch(console.error);
240
- }
241
- reject(reason);
242
- }
243
- })).catch(reject);
244
- });
245
-
246
- // Return the response with the first stream.
247
- return new Response(stream1, {
248
- headers: {
249
- "Content-Type": "text/event-stream",
250
- "Cache-Control": "no-cache",
251
- "Connection": "keep-alive",
252
- },
253
- status: 200,
254
- });
255
-
256
- } else {
257
- // Handle non-streaming response
258
- const julepChatData = await chatResponse.json();
259
-
260
- const openaiCompletion = {
261
- id: julepChatData.id,
262
- object: "chat.completion",
263
- created: Math.floor(new Date(julepChatData.created_at).getTime() / 1000),
264
- model: model, // Use the requested model ID
265
- choices: julepChatData.choices.map((choice: any) => ({
266
- index: choice.index,
267
- message: {
268
- role: choice.message.role,
269
- content: choice.message.content,
270
- tool_calls: choice.message.tool_calls ? toolCallMessageToOpenAI(choice.message.tool_calls) : undefined,
271
- },
272
- finish_reason: choice.finish_reason,
273
- })),
274
- usage: julepChatData.usage ? {
275
- prompt_tokens: julepChatData.usage.prompt_tokens,
276
- completion_tokens: julepChatData.usage.completion_tokens,
277
- total_tokens: julepChatData.usage.total_tokens,
278
- } : undefined,
279
- };
280
-
281
- // Attempt to clean up the temporary agent and session (fire and forget)
282
- if (sessionId) {
283
- fetch(`${JULEP_API_BASE}/sessions/${sessionId}`, { method: "DELETE", headers }).catch(console.error);
284
- }
285
- if (agentId) {
286
- fetch(`${JULEP_API_BASE}/agents/${agentId}`, { method: "DELETE", headers }).catch(console.error);
287
- }
288
-
289
- return new Response(JSON.stringify(openaiCompletion), {
290
- headers: { "Content-Type": "application/json" },
291
- status: 200,
292
- });
293
  }
294
 
295
- } catch (error) {
296
- console.error("Error handling chat completions request:", error);
297
- // Attempt to clean up in case of errors before session/agent creation
298
- if (sessionId) {
299
- fetch(`${JULEP_API_BASE}/sessions/${sessionId}`, { method: "DELETE", headers }).catch(console.error);
300
- }
301
- if (agentId) {
302
- fetch(`${Julep_API_BASE}/agents/${agentId}`, { method: "DELETE", headers }).catch(console.error);
303
- }
304
- return new Response("Internal Server Error", { status: 500 });
305
- }
306
- }
307
-
308
- // Helper to format Julep ToolCall delta to OpenAI format
309
- function toolCallDeltaToOpenAI(julepToolCalls: any[]): any[] {
310
- return julepToolCalls.map(toolCall => {
311
- // Assuming Julep's delta format for tool_calls is similar to the message format
312
- // and contains function objects directly. Adjust if necessary.
313
- return {
314
- id: toolCall.id,
315
- type: "function",
316
- function: {
317
- name: toolCall.function?.name,
318
- arguments: toolCall.function?.arguments, // Arguments might be streamed as chunks
319
- },
320
- };
321
- });
322
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
323
 
324
- // Helper to format Julep ToolCall message to OpenAI format
325
- function toolCallMessageToOpenAI(julepToolCalls: any[]): any[] {
326
- return julepToolCalls.map(toolCall => {
327
- return {
328
- id: toolCall.id,
329
- type: "function",
330
- function: {
331
- name: toolCall.function?.name,
332
- arguments: toolCall.function?.arguments, // Arguments should be complete in non-streaming
333
- },
334
- };
335
- });
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
336
  }
337
 
338
- // Main request handler
339
- async function handler(req: Request): Promise<Response> {
340
- const url = new URL(req.url);
341
-
342
- if (url.pathname === "/v1/models" && req.method === "GET") {
343
- return handleModels(req);
344
- } else if (url.pathname === "/v1/chat/completions" && req.method === "POST") {
345
- return handleChatCompletions(req);
346
- } else {
347
- return new Response("Not Found", { status: 404 });
348
- }
349
- }
350
 
351
- console.log(`HTTP server running on http://localhost:8000`);
352
- serve(handler, { port: 7860 });
 
1
+ import { serve } from "https://deno.land/std@0.220.1/http/server.ts";
2
+
3
+ // --- 配置常量 ---
4
+ const AUTH_KEY = Deno.env.get("AUTH_KEY") ?? "default_api_key_value"; //API密钥在Environment Variables中添加,否则默认是default_api_key_value
5
+ const TARGET_URL = 'https://assistant.on.adaptive.ai/api/sendMessage';
6
+ const PROXY_MODEL_NAME = "gpt-4o"; // 代理服务返回的模型名称
7
+
8
+
9
+ const TARGET_HEADERS = {
10
+ 'content-type': 'application/json',
11
+ 'x-channel-id': "0"
12
+ };
13
+
14
+ // --- 辅助函数:创建 SSE 数据块 ---
15
+ function createSSEChunk(id: string, model: string, content: string | null, role: string | null, finish_reason: string | null): string {
16
+ const now = Math.floor(Date.now() / 1000);
17
+ const chunk: any = {
18
+ id: id,
19
+ object: "chat.completion.chunk",
20
+ created: now,
21
+ model: model,
22
+ choices: [
23
+ {
24
+ index: 0,
25
+ delta: {},
26
+ finish_reason: finish_reason,
27
+ logprobs: null,
28
+ }
29
+ ],
30
+ // system_fingerprint: null, // 可选
31
+ };
32
+ if (role) {
33
+ chunk.choices[0].delta.role = role;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
34
  }
35
+ if (content) {
36
+ chunk.choices[0].delta.content = content;
 
 
37
  }
38
+ // 如果 delta 为空且有 finish_reason,确保 delta 是空对象
39
+ if (!role && !content && finish_reason) {
40
+ chunk.choices[0].delta = {};
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
41
  }
42
 
43
+ return `data: ${JSON.stringify(chunk)}\n\n`;
44
+ }
45
 
46
+ /**
47
+ * Adaptive AI 发送创建新聊天的请求。
48
+ * 成功时返回新聊天的 ID (字符串)。
49
+ * 如果在过程中发生任何错误,则捕获错误并返500。
50
+ *
51
+ * @returns {Promise<string | 500>} 返回成功创建的聊天的 ID (string),或在失败时返回 500。
52
+ */
53
+ async function createChatAndGetId(): Promise<string> {
54
+ const url = 'https://assistant.on.adaptive.ai/api/createChat';
55
+ // 定义请求头 (省略了 cookie 和 priority)
56
+ const headers = {
57
+ 'content-type': 'application/json',
58
  };
59
+ // 构建请求体,使用当前时间戳作为请求 ID
60
+ const payload = {
61
+ json: {
62
+ jsonrpc: "2.0",
63
+ id: Date.now(), // 使用动态 ID
64
+ method: "createChat",
65
+ params: []
66
+ }
67
+ };
68
+ console.log("正在发送创建聊天请求...");
69
+ try {
70
+ const response = await fetch(url, {
71
+ method: 'POST',
72
+ headers: headers,
73
+ body: JSON.stringify(payload) // payload 对象转换为 JSON 字符串
74
+ });
75
+ // 检查 HTTP 响应状态码是否表示成功
76
+ if (!response.ok) {
77
+ let errorBody = "无法读取响应体";
78
+ try {
79
+ errorBody = await response.text(); // 尝试读取错误响应体
80
+ } catch (readError) {
81
+ console.warn("读取错误响应体失败:", readError);
82
+ }
83
+ throw new Error(`创建聊天请求失败,HTTP 状态码: ${response.status}. 响应: ${errorBody}`);
84
+ }
85
+ // 解析 JSON 响应体
86
+ let responseData: any;
87
+ try {
88
+ responseData = await response.json();
89
+ } catch (parseError) {
90
+ // 如果响应不是有效的 JSON,则抛出错误
91
+ console.error("解析创建聊天响应 JSON 时出错:", parseError);
92
+ throw new Error(`无法将响应解析为 JSON: ${parseError.message}`);
93
+ }
94
+ // 提取并验证 ID
95
+ // ID 预期在 responseData.json.result.id
96
+ // 使用可选链操作符 (?.) 来安全地访问嵌套属性,防止因中间属性不存在而报错
97
+ const chatId = responseData?.json?.result?.id;
98
+ // 检查提取到的 ID 是否是一个有效的、非空的字符串
99
+ if (typeof chatId === 'string' && chatId.length > 0) {
100
+ console.log(`成功创建聊天,获取到 ID: ${chatId}`);
101
+ return chatId; // 返回提取到的 ID
102
+ } else {
103
+ // 如果 ID 不存在或格式不正确,则抛出错误
104
+ console.error("从响应中未能提取有效的聊天 ID。响应数据:", JSON.stringify(responseData));
105
+ throw new Error("创建聊天的响应格式无效或缺少 'json.result.id' 字段。");
106
+ }
107
+ } catch (error) {
108
+ // 捕获 try 块中抛出的任何错误,或 fetch 本身的网络错误
109
+ const errorMessage = error instanceof Error ? error.message : String(error);
110
+ console.error("执行 createChatAndGetId 时发生错误:", errorMessage); // 记录详细错误信息
111
+ // 创建并返回一个 Response 对象
112
+ // 状态码为 500 (Internal Server Error)
113
+ // 响应体为 JSON 格式,包含错误信息
114
+ return new Response(
115
+ JSON.stringify({ error: `创建聊天会话失败: ${errorMessage}` }), // 将错误信息包装在 JSON 对象中
116
+ {
117
+ status: 500, // 设置 HTTP 状态码为 500
118
+ headers: {
119
+ "Content-Type": "application/json", // 设置响应内容类型为 JSON
120
+ "Access-Control-Allow-Origin": "*" // 如果需要跨域,添加此头
121
+ }
122
+ }
123
+ );
124
  }
125
 
126
+ }
 
 
 
 
 
 
 
 
 
 
 
 
 
127
 
128
+ // --- 主处理函数 ---
129
+ async function handler(req: Request): Promise<Response> {
130
+ const url = new URL(req.url);
131
+
132
+ // --- CORS 预检请求处理 ---
133
+ if (req.method === "OPTIONS") {
134
+ return new Response(null, {
135
+ status: 204,
136
+ headers: {
137
+ "Access-Control-Allow-Origin": "*",
138
+ "Access-Control-Allow-Methods": "POST, OPTIONS",
139
+ "Access-Control-Allow-Headers": "Content-Type, Authorization",
140
+ "Access-Control-Max-Age": "86400",
141
+ },
142
+ });
 
 
 
 
143
  }
144
 
145
+
146
+ // 模型列表接口
147
+ if (url.pathname === "/v1/models" && req.method === "GET") {
148
+ return new Response(
149
+ JSON.stringify({
150
+ object: "list",
151
+ data: [
152
+ {
153
+ id: "gpt-4o",
154
+ object: "model",
155
+ created: 0,
156
+ owned_by: "unlimitedai",
157
+ permission: [{
158
+ id: "modelperm-gpt-4o",
159
+ object: "model_permission",
160
+ created: 0,
161
+ allow_create_engine: false,
162
+ allow_sampling: true,
163
+ allow_logprobs: false,
164
+ allow_search_indices: false,
165
+ allow_view: true,
166
+ allow_fine_tuning: false,
167
+ organization: "*",
168
+ group: null,
169
+ is_blocking: false,
170
+ }],
171
+ root: "gpt-4o",
172
+ parent: null,
173
+ },
174
+ ],
175
+ }),
176
+ {
177
+ status: 200,
178
+ headers: {
179
+ "Content-Type": "application/json",
180
+ "Access-Control-Allow-Origin": "*",
181
+ },
182
  }
183
+ );
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
184
  }
185
 
186
+ // --- 路径和方法检查 ---
187
+ if (url.pathname !== "/v1/chat/completions" || req.method !== "POST") {
188
+ return new Response(JSON.stringify({ error: "Not Found or Method Not Allowed" }), {
189
+ status: 404,
190
+ headers: {
191
+ "Content-Type": "application/json",
192
+ "Access-Control-Allow-Origin": "*",
193
+ },
194
+ });
195
+ }
196
+ // --- 添加认证检查 ---
197
+ const authHeader = req.headers.get("Authorization");
198
+ let providedKey = "";
199
+ // 检查 Authorization header 是否存在且格式正确 (Bearer <key>)
200
+ if (!authHeader || !authHeader.toLowerCase().startsWith("bearer ")) {
201
+ console.warn(`认证失败: 缺少或格式错误的 Authorization header`);
202
+ return new Response(JSON.stringify({
203
+ error: {
204
+ message: "Unauthorized: Missing or invalid Authorization header. Use 'Bearer <YOUR_API_KEY>' format.",
205
+ type: "invalid_request_error",
206
+ param: null,
207
+ code: "missing_or_invalid_header"
208
+ }
209
+ }), {
210
+ status: 401, // Unauthorized
211
+ headers: {
212
+ "Content-Type": "application/json",
213
+ "Access-Control-Allow-Origin": "*",
214
+ "WWW-Authenticate": 'Bearer realm="API Access"'
215
+ }
216
+ });
217
+ }
218
+ // 提取 key 部分
219
+ providedKey = authHeader.substring(7); // "Bearer ".length is 7
220
+ console.log("providedKey:" + providedKey);
221
+ // 直接比较提供的 key 和硬编码的 key
222
+ if (providedKey !== AUTH_KEY) {
223
+ console.warn(`认证失败: 无效的 API Key 提供`);
224
+ return new Response(JSON.stringify({
225
+ error: {
226
+ message: "Unauthorized: Invalid API Key provided.",
227
+ type: "invalid_request_error",
228
+ param: null,
229
+ code: "inAUTH_KEY"
230
+ }
231
+ }), {
232
+ status: 401, // Unauthorized
233
+ headers: {
234
+ "Content-Type": "application/json",
235
+ "Access-Control-Allow-Origin": "*",
236
+ "WWW-Authenticate": 'Bearer realm="API Access"'
237
+ }
238
+ });
239
+ }
240
 
241
+ // --- 处理 POST 请求 ---
242
+ try {
243
+
244
+ // 1. 解析入站请求体
245
+ let requestBody: any;
246
+ try {
247
+ requestBody = await req.json();
248
+ console.log(requestBody)
249
+ } catch (e) {
250
+ console.error("Failed to parse request JSON:", e);
251
+ return new Response(JSON.stringify({ error: "Invalid JSON in request body" }), {
252
+ status: 400, headers: { "Content-Type": "application/json", "Access-Control-Allow-Origin": "*" },
253
+ });
254
+ }
255
+
256
+ // 2. 检查是否请求流式响应
257
+ const isStream = requestBody.stream === true;
258
+
259
+ // 3. 提取用户输入的内容 - 将 messages 数组转换为字符串
260
+ let userContent: string | undefined;
261
+ if (Array.isArray(requestBody.messages) && requestBody.messages.length > 0) {
262
+ try {
263
+ // 直接将整个 messages 数组转换为 JSON 字符串
264
+ userContent = JSON.stringify(requestBody.messages);
265
+ } catch (e) {
266
+ console.error("Failed to stringify 'messages' array:", e);
267
+ // 如果 JSON.stringify 失败 (虽然对数组不太可能,但以防万一)
268
+ return new Response(JSON.stringify({ error: "Failed to process 'messages' array." }), {
269
+ status: 400, headers: { "Content-Type": "application/json", "Access-Control-Allow-Origin": "*" },
270
+ });
271
+ }
272
+ }
273
+ // 检查 userContent 是否成功生成
274
+ // 如果 requestBody.messages 不存在、不是数组、为空,或者转换出错,userContent 会是 undefined
275
+ if (!userContent) {
276
+ console.error("Request body must contain a non-empty 'messages' array.");
277
+ return new Response(JSON.stringify({ error: "Request body must contain a non-empty 'messages' array." }), {
278
+ status: 400, headers: { "Content-Type": "application/json", "Access-Control-Allow-Origin": "*" },
279
+ });
280
+ }
281
+
282
+ // 现在 userContent 包含了��个对话历史的字符串表示
283
+ console.log("Formatted user content:", userContent); // 可以取消注释来调试输出
284
+
285
+ const CHAT_ID = await createChatAndGetId();//获取新的聊天ID
286
+
287
+
288
+ // 4. 构建目标 API Payload
289
+ const payload = {
290
+ json: {
291
+ jsonrpc: "2.0", id: Date.now(), method: "sendMessage",
292
+ params: [{ chatId: CHAT_ID, content: userContent, fileId: null, fileIds: [] }]
293
+ },
294
+ meta: { values: { "params.0.fileId": ["undefined"] } }
295
+ };
296
+
297
+ // 5. 发送请求到目标 API (无论是否流式,都需要先获取完整响应)
298
+ console.log("Forwarding request to:", TARGET_URL);
299
+ const targetResponse = await fetch(TARGET_URL, {
300
+ method: 'POST', headers: TARGET_HEADERS, body: JSON.stringify(payload),
301
+ });
302
+
303
+ // 6. 处理目标 API 的响应
304
+ if (!targetResponse.ok) {
305
+ const errorBody = await targetResponse.text();
306
+ console.error(`Target API Error (${targetResponse.status}):`, errorBody);
307
+ // 即使是流式请求失败,也返回 JSON 错误
308
+ return new Response(JSON.stringify({ error: `Upstream API request failed with status ${targetResponse.status}`, details: errorBody }), {
309
+ status: 502, headers: { "Content-Type": "application/json", "Access-Control-Allow-Origin": "*" },
310
+ });
311
+ }
312
+
313
+ // 7. 解析目标 API 的 JSON 响应
314
+ let targetData: any;
315
+ try {
316
+ targetData = await targetResponse.json();
317
+ } catch (e) {
318
+ console.error("Failed to parse target API response JSON:", e);
319
+ // 即使是流式请求失败,也返回 JSON 错误
320
+ return new Response(JSON.stringify({ error: "Failed to parse upstream API response" }), {
321
+ status: 500, headers: { "Content-Type": "application/json", "Access-Control-Allow-Origin": "*" },
322
+ });
323
+ }
324
+
325
+ // 8. 从目标响应中提取内容
326
+ const assistantContent = targetData?.json?.result?.content;
327
+ if (typeof assistantContent !== 'string') {
328
+ console.error("Could not extract 'content' from target API response:", JSON.stringify(targetData));
329
+ // 即使是流式请求失败,也返回 JSON 错误
330
+ return new Response(JSON.stringify({ error: "Invalid response format from upstream API" }), {
331
+ status: 500, headers: { "Content-Type": "application/json", "Access-Control-Allow-Origin": "*" },
332
+ });
333
+ }
334
+
335
+ // 9. 根据 isStream 决定返回格式
336
+ const chatCompletionId = `chatcmpl-${crypto.randomUUID()}`; // 为本次交互生成唯一 ID
337
+ const modelName = requestBody.model || PROXY_MODEL_NAME; // 确定模型名称
338
+
339
+ if (isStream) {
340
+ // --- 返回模拟的流式响应 ---
341
+ console.log("Simulating stream response...");
342
+ const encoder = new TextEncoder();
343
+ const stream = new ReadableStream({
344
+ async start(controller) {
345
+ try {
346
+ // 模拟发送块
347
+ // 块 1: 发送角色信息
348
+ controller.enqueue(encoder.encode(
349
+ createSSEChunk(chatCompletionId, modelName, null, "assistant", null)
350
+ ));
351
+ await new Promise(resolve => setTimeout(resolve, 10)); // 短暂延迟,模拟处理
352
+
353
+ // 块 2: 发送完整内容
354
+ controller.enqueue(encoder.encode(
355
+ createSSEChunk(chatCompletionId, modelName, assistantContent, null, null)
356
+ ));
357
+ await new Promise(resolve => setTimeout(resolve, 10)); // 短暂延迟
358
+
359
+ // 块 3: 发送结束信号
360
+ controller.enqueue(encoder.encode(
361
+ createSSEChunk(chatCompletionId, modelName, null, null, "stop")
362
+ ));
363
+
364
+ // 发送 [DONE] 标记
365
+ controller.enqueue(encoder.encode("data: [DONE]\n\n"));
366
+
367
+ // 关闭流
368
+ controller.close();
369
+ } catch (error) {
370
+ console.error("Error during stream simulation:", error);
371
+ controller.error(error); // 通知流出错了
372
+ }
373
+ }
374
+ });
375
+
376
+ return new Response(stream, {
377
+ status: 200,
378
+ headers: {
379
+ 'Content-Type': 'text/event-stream',
380
+ 'Cache-Control': 'no-cache',
381
+ 'Connection': 'keep-alive', // 建议 SSE 使用
382
+ 'Access-Control-Allow-Origin': '*'
383
+ },
384
+ });
385
+
386
+ } else {
387
+ // --- 返回完整的 JSON 响应 ---
388
+ console.log("Returning non-stream response.");
389
+ const finalResponse = {
390
+ id: chatCompletionId,
391
+ object: "chat.completion",
392
+ created: Math.floor(Date.now() / 1000),
393
+ model: modelName,
394
+ choices: [
395
+ {
396
+ index: 0,
397
+ message: {
398
+ role: "assistant",
399
+ content: assistantContent,
400
+ },
401
+ finish_reason: "stop",
402
+ logprobs: null,
403
+ }
404
+ ],
405
+ usage: { prompt_tokens: 0, completion_tokens: 0, total_tokens: 0 },
406
+ };
407
+
408
+ return new Response(JSON.stringify(finalResponse), {
409
+ status: 200,
410
+ headers: {
411
+ 'Content-Type': 'application/json',
412
+ 'Access-Control-Allow-Origin': '*'
413
+ },
414
+ });
415
+ }
416
+
417
+ } catch (error) {
418
+ // --- 全局错误处理 ---
419
+ console.error("Unhandled error in handler:", error);
420
+ // 即使请求流式,也返回 JSON 错误
421
+ return new Response(JSON.stringify({ error: "Internal Server Error" }), {
422
+ status: 500,
423
+ headers: { "Content-Type": "application/json", "Access-Control-Allow-Origin": "*" },
424
+ });
425
+ }
426
  }
427
 
 
 
 
 
 
 
 
 
 
 
 
 
428
 
429
+ serve(handler);