victor HF Staff commited on
Commit
dff2be9
·
1 Parent(s): dc037f7

feat: switch to official openai client

Browse files
ai/providers.ts DELETED
@@ -1,117 +0,0 @@
1
- import { createOpenAI } from "@ai-sdk/openai";
2
- import { createGroq } from "@ai-sdk/groq";
3
- import { createAnthropic } from "@ai-sdk/anthropic";
4
- import { createXai } from "@ai-sdk/xai";
5
-
6
- import {
7
- customProvider,
8
- wrapLanguageModel,
9
- extractReasoningMiddleware
10
- } from "ai";
11
-
12
- export interface ModelInfo {
13
- provider: string;
14
- name: string;
15
- description: string;
16
- apiVersion: string;
17
- capabilities: string[];
18
- }
19
-
20
- const middleware = extractReasoningMiddleware({
21
- tagName: 'think',
22
- });
23
-
24
- // Helper to get API keys from environment variables first, then localStorage
25
- const getApiKey = (key: string): string | undefined => {
26
- // Check for environment variables first
27
- if (process.env[key]) {
28
- return process.env[key] || undefined;
29
- }
30
-
31
- // Fall back to localStorage if available
32
- if (typeof window !== 'undefined') {
33
- return window.localStorage.getItem(key) || undefined;
34
- }
35
-
36
- return undefined;
37
- };
38
-
39
- // Create provider instances with API keys from localStorage
40
- const openaiClient = createOpenAI({
41
- apiKey: getApiKey('OPENAI_API_KEY'),
42
- });
43
-
44
- const anthropicClient = createAnthropic({
45
- apiKey: getApiKey('ANTHROPIC_API_KEY'),
46
- });
47
-
48
- const groqClient = createGroq({
49
- apiKey: getApiKey('GROQ_API_KEY'),
50
- });
51
-
52
- const xaiClient = createXai({
53
- apiKey: getApiKey('XAI_API_KEY'),
54
- });
55
-
56
- const languageModels = {
57
- "gpt-4.1-mini": openaiClient("gpt-4.1-mini"),
58
- "claude-3-7-sonnet": anthropicClient('claude-3-7-sonnet-20250219'),
59
- "qwen-qwq": wrapLanguageModel(
60
- {
61
- model: groqClient("qwen-qwq-32b"),
62
- middleware
63
- }
64
- ),
65
- "grok-3-mini": xaiClient("grok-3-mini-latest"),
66
- };
67
-
68
- export const modelDetails: Record<keyof typeof languageModels, ModelInfo> = {
69
- "gpt-4.1-mini": {
70
- provider: "OpenAI",
71
- name: "GPT-4.1 Mini",
72
- description: "Compact version of OpenAI's GPT-4.1 with good balance of capabilities, including vision.",
73
- apiVersion: "gpt-4.1-mini",
74
- capabilities: ["Balance", "Creative", "Vision"]
75
- },
76
- "claude-3-7-sonnet": {
77
- provider: "Anthropic",
78
- name: "Claude 3.7 Sonnet",
79
- description: "Latest version of Anthropic's Claude 3.7 Sonnet with strong reasoning and coding capabilities.",
80
- apiVersion: "claude-3-7-sonnet-20250219",
81
- capabilities: ["Reasoning", "Efficient", "Agentic"]
82
- },
83
- "qwen-qwq": {
84
- provider: "Groq",
85
- name: "Qwen QWQ",
86
- description: "Latest version of Alibaba's Qwen QWQ with strong reasoning and coding capabilities.",
87
- apiVersion: "qwen-qwq",
88
- capabilities: ["Reasoning", "Efficient", "Agentic"]
89
- },
90
- "grok-3-mini": {
91
- provider: "XAI",
92
- name: "Grok 3 Mini",
93
- description: "Latest version of XAI's Grok 3 Mini with strong reasoning and coding capabilities.",
94
- apiVersion: "grok-3-mini-latest",
95
- capabilities: ["Reasoning", "Efficient", "Agentic"]
96
- },
97
- };
98
-
99
- // Update API keys when localStorage changes (for runtime updates)
100
- if (typeof window !== 'undefined') {
101
- window.addEventListener('storage', (event) => {
102
- // Reload the page if any API key changed to refresh the providers
103
- if (event.key?.includes('API_KEY')) {
104
- window.location.reload();
105
- }
106
- });
107
- }
108
-
109
- export const model = customProvider({
110
- languageModels,
111
- });
112
-
113
- export type modelID = keyof typeof languageModels;
114
-
115
- export const MODELS = Object.keys(languageModels);
116
-
117
- export const defaultModel: modelID = "qwen-qwq";
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
app/actions.ts CHANGED
@@ -1,9 +1,8 @@
1
  "use server";
2
 
3
- import { openai } from "@ai-sdk/openai";
4
- import { generateObject } from "ai";
5
  import { z } from "zod";
6
- import { startMcpSandbox } from '@/lib/mcp-sandbox';
7
 
8
  // Use a global map to store active sandbox instances across requests
9
  const activeSandboxes = (global as any).activeSandboxes || new Map();
@@ -13,61 +12,61 @@ const activeSandboxes = (global as any).activeSandboxes || new Map();
13
  function getMessageText(message: any): string {
14
  // Check if the message has parts (new format)
15
  if (message.parts && Array.isArray(message.parts)) {
16
- const textParts = message.parts.filter((p: any) => p.type === 'text' && p.text);
 
 
17
  if (textParts.length > 0) {
18
- return textParts.map((p: any) => p.text).join('\n');
19
  }
20
  }
21
-
22
  // Fallback to content (old format)
23
- if (typeof message.content === 'string') {
24
  return message.content;
25
  }
26
-
27
  // If content is an array (potentially of parts), try to extract text
28
  if (Array.isArray(message.content)) {
29
- const textItems = message.content.filter((item: any) =>
30
- typeof item === 'string' || (item.type === 'text' && item.text)
 
31
  );
32
-
33
  if (textItems.length > 0) {
34
- return textItems.map((item: any) =>
35
- typeof item === 'string' ? item : item.text
36
- ).join('\n');
37
  }
38
  }
39
-
40
- return '';
41
  }
42
 
43
- export async function generateTitle(messages: any[]) {
44
- // Convert messages to a format that OpenAI can understand
45
- const normalizedMessages = messages.map(msg => ({
46
- role: msg.role,
47
- content: getMessageText(msg)
48
  }));
49
-
50
- const { object } = await generateObject({
51
- model: openai("gpt-4.1"),
52
- schema: z.object({
53
- title: z.string().min(1).max(100),
54
- }),
55
- system: `
56
- You are a helpful assistant that generates titles for chat conversations.
57
- The title should be a short description of the conversation.
58
- The title should be no more than 30 characters.
59
- The title should be unique and not generic.
60
- `,
61
  messages: [
62
- ...normalizedMessages,
63
  {
64
- role: "user",
65
- content: "Generate a title for the conversation.",
 
66
  },
 
 
67
  ],
68
  });
69
 
70
- return object.title;
 
 
 
71
  }
72
 
73
  export interface KeyValuePair {
@@ -85,75 +84,84 @@ export async function startSandbox(params: {
85
  env?: KeyValuePair[];
86
  }): Promise<{ url: string }> {
87
  const { id, command, args, env } = params;
88
-
89
  console.log(`[startSandbox] Starting sandbox for ID: ${id}`);
90
-
91
  // Validate required fields
92
  if (!id || !command || !args) {
93
- throw new Error('Missing required fields');
94
  }
95
-
96
  // Check if we already have a sandbox for this ID
97
  if (activeSandboxes.has(id)) {
98
  // If we do, get the URL and return it without creating a new sandbox
99
  const existingSandbox = activeSandboxes.get(id);
100
- console.log(`[startSandbox] Reusing existing sandbox for ${id}, URL: ${existingSandbox.url}`);
101
-
 
 
102
  // Re-fetch the URL to make sure it's current
103
  try {
104
  const freshUrl = await existingSandbox.sandbox.getUrl();
105
  console.log(`[startSandbox] Updated sandbox URL for ${id}: ${freshUrl}`);
106
-
107
  // Update the URL in the map
108
- activeSandboxes.set(id, {
109
- sandbox: existingSandbox.sandbox,
110
- url: freshUrl
111
  });
112
-
113
  return { url: freshUrl };
114
  } catch (error) {
115
- console.error(`[startSandbox] Error refreshing sandbox URL for ${id}:`, error);
116
-
 
 
 
117
  // Fall through to create a new sandbox if we couldn't refresh the URL
118
  activeSandboxes.delete(id);
119
- console.log(`[startSandbox] Removed stale sandbox for ${id}, will create a new one`);
 
 
120
  }
121
  }
122
-
123
  // Build the command string
124
  let cmd: string;
125
-
126
  // Prepare the command based on the type of executable
127
- if (command === 'uvx') {
128
  // For uvx, use the direct format
129
  const toolName = args[0];
130
- cmd = `uvx ${toolName} ${args.slice(1).join(' ')}`;
131
- } else if (command.includes('python')) {
132
  // For python commands
133
- cmd = `${command} ${args.join(' ')}`;
134
  } else {
135
  // For node or other commands
136
- cmd = `${command} ${args.join(' ')}`;
137
  }
138
-
139
  // Convert env array to object if needed
140
  const envs: Record<string, string> = {};
141
  if (env && env.length > 0) {
142
  env.forEach((envVar) => {
143
- if (envVar.key) envs[envVar.key] = envVar.value || '';
144
  });
145
  }
146
-
147
  // Start the sandbox
148
- console.log(`[startSandbox] Creating new sandbox for ${id} with command: ${cmd}`);
 
 
149
  const sandbox = await startMcpSandbox({ cmd, envs });
150
  const url = await sandbox.getUrl();
151
-
152
  console.log(`[startSandbox] Sandbox created for ${id}, URL: ${url}`);
153
-
154
  // Store the sandbox in our map
155
  activeSandboxes.set(id, { sandbox, url });
156
-
157
  return { url };
158
  }
159
 
@@ -162,17 +170,17 @@ export async function startSandbox(params: {
162
  */
163
  export async function stopSandbox(id: string): Promise<{ success: boolean }> {
164
  if (!id) {
165
- throw new Error('Missing sandbox ID');
166
  }
167
-
168
  // Check if we have a sandbox with this ID
169
  if (!activeSandboxes.has(id)) {
170
  throw new Error(`No active sandbox found with ID: ${id}`);
171
  }
172
-
173
  // Stop the sandbox
174
  const { sandbox } = activeSandboxes.get(id);
175
-
176
  try {
177
  await sandbox.stop();
178
  console.log(`Stopped sandbox with ID: ${id}`);
@@ -180,9 +188,9 @@ export async function stopSandbox(id: string): Promise<{ success: boolean }> {
180
  console.error(`Error stopping sandbox ${id}:`, stopError);
181
  // Continue to remove from the map even if stop fails
182
  }
183
-
184
  // Remove from our map
185
  activeSandboxes.delete(id);
186
-
187
  return { success: true };
188
  }
 
1
  "use server";
2
 
3
+ import { openai } from "@/lib/openai-client";
 
4
  import { z } from "zod";
5
+ import { startMcpSandbox } from "@/lib/mcp-sandbox";
6
 
7
  // Use a global map to store active sandbox instances across requests
8
  const activeSandboxes = (global as any).activeSandboxes || new Map();
 
12
  function getMessageText(message: any): string {
13
  // Check if the message has parts (new format)
14
  if (message.parts && Array.isArray(message.parts)) {
15
+ const textParts = message.parts.filter(
16
+ (p: any) => p.type === "text" && p.text
17
+ );
18
  if (textParts.length > 0) {
19
+ return textParts.map((p: any) => p.text).join("\n");
20
  }
21
  }
22
+
23
  // Fallback to content (old format)
24
+ if (typeof message.content === "string") {
25
  return message.content;
26
  }
27
+
28
  // If content is an array (potentially of parts), try to extract text
29
  if (Array.isArray(message.content)) {
30
+ const textItems = message.content.filter(
31
+ (item: any) =>
32
+ typeof item === "string" || (item.type === "text" && item.text)
33
  );
34
+
35
  if (textItems.length > 0) {
36
+ return textItems
37
+ .map((item: any) => (typeof item === "string" ? item : item.text))
38
+ .join("\n");
39
  }
40
  }
41
+
42
+ return "";
43
  }
44
 
45
+ export async function generateTitle(messages: any[]): Promise<string> {
46
+ const normalized = messages.map((m) => ({
47
+ role: m.role as "user" | "assistant" | "system",
48
+ content: Array.isArray(m.content) ? m.content.join("\n") : m.content,
 
49
  }));
50
+
51
+ const response = await openai.chat.completions.create({
52
+ model: "gpt-3.5-turbo",
53
+ max_tokens: 30,
54
+ temperature: 0.4,
 
 
 
 
 
 
 
55
  messages: [
 
56
  {
57
+ role: "system",
58
+ content:
59
+ "You are a helpful assistant that writes very short, unique titles (≤30 characters) for chat conversations.",
60
  },
61
+ ...normalized,
62
+ { role: "user", content: "Write the title only." },
63
  ],
64
  });
65
 
66
+ const title = response.choices[0]?.message.content?.trim() ?? "New Chat";
67
+
68
+ // basic schema check
69
+ return z.string().min(1).max(100).parse(title);
70
  }
71
 
72
  export interface KeyValuePair {
 
84
  env?: KeyValuePair[];
85
  }): Promise<{ url: string }> {
86
  const { id, command, args, env } = params;
87
+
88
  console.log(`[startSandbox] Starting sandbox for ID: ${id}`);
89
+
90
  // Validate required fields
91
  if (!id || !command || !args) {
92
+ throw new Error("Missing required fields");
93
  }
94
+
95
  // Check if we already have a sandbox for this ID
96
  if (activeSandboxes.has(id)) {
97
  // If we do, get the URL and return it without creating a new sandbox
98
  const existingSandbox = activeSandboxes.get(id);
99
+ console.log(
100
+ `[startSandbox] Reusing existing sandbox for ${id}, URL: ${existingSandbox.url}`
101
+ );
102
+
103
  // Re-fetch the URL to make sure it's current
104
  try {
105
  const freshUrl = await existingSandbox.sandbox.getUrl();
106
  console.log(`[startSandbox] Updated sandbox URL for ${id}: ${freshUrl}`);
107
+
108
  // Update the URL in the map
109
+ activeSandboxes.set(id, {
110
+ sandbox: existingSandbox.sandbox,
111
+ url: freshUrl,
112
  });
113
+
114
  return { url: freshUrl };
115
  } catch (error) {
116
+ console.error(
117
+ `[startSandbox] Error refreshing sandbox URL for ${id}:`,
118
+ error
119
+ );
120
+
121
  // Fall through to create a new sandbox if we couldn't refresh the URL
122
  activeSandboxes.delete(id);
123
+ console.log(
124
+ `[startSandbox] Removed stale sandbox for ${id}, will create a new one`
125
+ );
126
  }
127
  }
128
+
129
  // Build the command string
130
  let cmd: string;
131
+
132
  // Prepare the command based on the type of executable
133
+ if (command === "uvx") {
134
  // For uvx, use the direct format
135
  const toolName = args[0];
136
+ cmd = `uvx ${toolName} ${args.slice(1).join(" ")}`;
137
+ } else if (command.includes("python")) {
138
  // For python commands
139
+ cmd = `${command} ${args.join(" ")}`;
140
  } else {
141
  // For node or other commands
142
+ cmd = `${command} ${args.join(" ")}`;
143
  }
144
+
145
  // Convert env array to object if needed
146
  const envs: Record<string, string> = {};
147
  if (env && env.length > 0) {
148
  env.forEach((envVar) => {
149
+ if (envVar.key) envs[envVar.key] = envVar.value || "";
150
  });
151
  }
152
+
153
  // Start the sandbox
154
+ console.log(
155
+ `[startSandbox] Creating new sandbox for ${id} with command: ${cmd}`
156
+ );
157
  const sandbox = await startMcpSandbox({ cmd, envs });
158
  const url = await sandbox.getUrl();
159
+
160
  console.log(`[startSandbox] Sandbox created for ${id}, URL: ${url}`);
161
+
162
  // Store the sandbox in our map
163
  activeSandboxes.set(id, { sandbox, url });
164
+
165
  return { url };
166
  }
167
 
 
170
  */
171
  export async function stopSandbox(id: string): Promise<{ success: boolean }> {
172
  if (!id) {
173
+ throw new Error("Missing sandbox ID");
174
  }
175
+
176
  // Check if we have a sandbox with this ID
177
  if (!activeSandboxes.has(id)) {
178
  throw new Error(`No active sandbox found with ID: ${id}`);
179
  }
180
+
181
  // Stop the sandbox
182
  const { sandbox } = activeSandboxes.get(id);
183
+
184
  try {
185
  await sandbox.stop();
186
  console.log(`Stopped sandbox with ID: ${id}`);
 
188
  console.error(`Error stopping sandbox ${id}:`, stopError);
189
  // Continue to remove from the map even if stop fails
190
  }
191
+
192
  // Remove from our map
193
  activeSandboxes.delete(id);
194
+
195
  return { success: true };
196
  }
app/api/chat/route.ts CHANGED
@@ -1,20 +1,35 @@
1
- import { model, type modelID } from "@/ai/providers";
2
- import { smoothStream, streamText, type UIMessage } from "ai";
3
- import { appendResponseMessages } from 'ai';
4
- import { saveChat, saveMessages, convertToDBMessages } from '@/lib/chat-store';
5
- import { nanoid } from 'nanoid';
6
- import { db } from '@/lib/db';
7
- import { chats } from '@/lib/db/schema';
8
- import { eq, and } from 'drizzle-orm';
9
- import { initializeMCPClients, type MCPServerConfig } from '@/lib/mcp-client';
10
- import { generateTitle } from '@/app/actions';
11
-
12
- export const runtime = 'nodejs';
13
-
14
- // Allow streaming responses up to 30 seconds
 
 
 
 
15
  export const maxDuration = 120;
16
 
17
- export const dynamic = 'force-dynamic';
 
 
 
 
 
 
 
 
 
 
 
18
 
19
  export async function POST(req: Request) {
20
  const {
@@ -24,18 +39,18 @@ export async function POST(req: Request) {
24
  userId,
25
  mcpServers = [],
26
  }: {
27
- messages: UIMessage[];
28
  chatId?: string;
29
- selectedModel: modelID;
30
  userId: string;
31
  mcpServers?: MCPServerConfig[];
32
  } = await req.json();
33
 
34
  if (!userId) {
35
- return new Response(
36
- JSON.stringify({ error: "User ID is required" }),
37
- { status: 400, headers: { "Content-Type": "application/json" } }
38
- );
39
  }
40
 
41
  const id = chatId || nanoid();
@@ -46,10 +61,7 @@ export async function POST(req: Request) {
46
  if (chatId) {
47
  try {
48
  const existingChat = await db.query.chats.findFirst({
49
- where: and(
50
- eq(chats.id, chatId),
51
- eq(chats.userId, userId)
52
- )
53
  });
54
  isNewChat = !existingChat;
55
  } catch (error) {
@@ -65,12 +77,15 @@ export async function POST(req: Request) {
65
  if (isNewChat && messages.length > 0) {
66
  try {
67
  // Generate a title based on first user message
68
- const userMessage = messages.find(m => m.role === 'user');
69
- let title = 'New Chat';
70
 
71
- if (userMessage) {
72
  try {
73
- title = await generateTitle([userMessage]);
 
 
 
74
  } catch (error) {
75
  console.error("Error generating title:", error);
76
  }
@@ -81,120 +96,36 @@ export async function POST(req: Request) {
81
  id,
82
  userId,
83
  title,
84
- messages: [],
85
  });
86
  } catch (error) {
87
  console.error("Error saving new chat:", error);
88
  }
89
  }
90
 
91
- // Initialize MCP clients using the already running persistent SSE servers
92
- // mcpServers now only contains SSE configurations since stdio servers
93
- // have been converted to SSE in the MCP context
94
  const { tools, cleanup } = await initializeMCPClients(mcpServers, req.signal);
95
 
96
- console.log("messages", messages);
97
- console.log("parts", messages.map(m => m.parts.map(p => p)));
98
-
99
- // Track if the response has completed
100
- let responseCompleted = false;
101
-
102
- const result = streamText({
103
- model: model.languageModel(selectedModel),
104
- system: `You are a helpful assistant with access to a variety of tools.
105
-
106
- Today's date is ${new Date().toISOString().split('T')[0]}.
107
-
108
- The tools are very powerful, and you can use them to answer the user's question.
109
- So choose the tool that is most relevant to the user's question.
110
-
111
- If tools are not available, say you don't know or if the user wants a tool they can add one from the server icon in bottom left corner in the sidebar.
112
-
113
- You can use multiple tools in a single response.
114
- Always respond after using the tools for better user experience.
115
- You can run multiple steps using all the tools!!!!
116
- Make sure to use the right tool to respond to the user's question.
117
-
118
- Multiple tools can be used in a single response and multiple steps can be used to answer the user's question.
119
-
120
- ## Response Format
121
- - Markdown is supported.
122
- - Respond according to tool's response.
123
- - Use the tools to answer the user's question.
124
- - If you don't know the answer, use the tools to find the answer or say you don't know.
125
- `,
126
- messages,
127
- tools,
128
- maxSteps: 20,
129
- providerOptions: {
130
- google: {
131
- thinkingConfig: {
132
- thinkingBudget: 2048,
133
- },
134
- },
135
- anthropic: {
136
- thinking: {
137
- type: 'enabled',
138
- budgetTokens: 12000
139
- },
140
- }
141
- },
142
- experimental_transform: smoothStream({
143
- delayInMs: 5, // optional: defaults to 10ms
144
- chunking: 'line', // optional: defaults to 'word'
145
- }),
146
- onError: (error) => {
147
- console.error(JSON.stringify(error, null, 2));
148
  },
149
- async onFinish({ response }) {
150
- responseCompleted = true;
151
- const allMessages = appendResponseMessages({
152
- messages,
153
- responseMessages: response.messages,
154
- });
155
 
156
- await saveChat({
157
- id,
158
- userId,
159
- messages: allMessages,
160
- });
161
-
162
- const dbMessages = convertToDBMessages(allMessages, id);
163
- await saveMessages({ messages: dbMessages });
164
-
165
- // Clean up resources - now this just closes the client connections
166
- // not the actual servers which persist in the MCP context
167
- await cleanup();
168
- }
169
- });
170
-
171
- // Ensure cleanup happens if the request is terminated early
172
- req.signal.addEventListener('abort', async () => {
173
- if (!responseCompleted) {
174
- console.log("Request aborted, cleaning up resources");
175
- try {
176
- await cleanup();
177
- } catch (error) {
178
- console.error("Error during cleanup on abort:", error);
179
- }
180
- }
181
  });
182
 
183
- result.consumeStream()
184
- // Add chat ID to response headers so client can know which chat was created
185
- return result.toDataStreamResponse({
186
- sendReasoning: true,
187
  headers: {
188
- 'X-Chat-ID': id
189
- },
190
- getErrorMessage: (error) => {
191
- if (error instanceof Error) {
192
- if (error.message.includes("Rate limit")) {
193
- return "Rate limit exceeded. Please try again later.";
194
- }
195
- }
196
- console.error(error);
197
- return "An error occurred.";
198
  },
199
  });
200
- }
 
1
+ import { openai } from "@/lib/openai-client";
2
+ import type { ModelID } from "@/lib/models";
3
+ import { saveChat } from "@/lib/chat-store";
4
+ import { nanoid } from "nanoid";
5
+ import { db } from "@/lib/db";
6
+ import { chats } from "@/lib/db/schema";
7
+ import { eq, and } from "drizzle-orm";
8
+ import { initializeMCPClients, type MCPServerConfig } from "@/lib/mcp-client";
9
+ import { generateTitle } from "@/app/actions";
10
+ import { createOpenAIStream } from "@/lib/openai-stream";
11
+ import type {
12
+ ChatCompletionTool,
13
+ ChatCompletionMessageParam,
14
+ } from "openai/resources";
15
+
16
+ export const runtime = "nodejs";
17
+
18
+ // Allow streaming responses up to 120 seconds
19
  export const maxDuration = 120;
20
 
21
+ export const dynamic = "force-dynamic";
22
+
23
+ function mcpToolsToOpenAITools(
24
+ tools: Record<string, any>
25
+ ): ChatCompletionTool[] {
26
+ return Object.entries(tools).map(
27
+ ([name, schema]): ChatCompletionTool => ({
28
+ type: "function",
29
+ function: { name, parameters: schema.parameters },
30
+ })
31
+ );
32
+ }
33
 
34
  export async function POST(req: Request) {
35
  const {
 
39
  userId,
40
  mcpServers = [],
41
  }: {
42
+ messages: ChatCompletionMessageParam[];
43
  chatId?: string;
44
+ selectedModel: ModelID;
45
  userId: string;
46
  mcpServers?: MCPServerConfig[];
47
  } = await req.json();
48
 
49
  if (!userId) {
50
+ return new Response(JSON.stringify({ error: "User ID is required" }), {
51
+ status: 400,
52
+ headers: { "Content-Type": "application/json" },
53
+ });
54
  }
55
 
56
  const id = chatId || nanoid();
 
61
  if (chatId) {
62
  try {
63
  const existingChat = await db.query.chats.findFirst({
64
+ where: and(eq(chats.id, chatId), eq(chats.userId, userId)),
 
 
 
65
  });
66
  isNewChat = !existingChat;
67
  } catch (error) {
 
77
  if (isNewChat && messages.length > 0) {
78
  try {
79
  // Generate a title based on first user message
80
+ const userMessage = messages.find((m) => m.role === "user");
81
+ let title = "New Chat";
82
 
83
+ if (userMessage && typeof userMessage.content === "string") {
84
  try {
85
+ // The generateTitle function expects a UIMessage[], let's adapt
86
+ title = await generateTitle([
87
+ { role: "user", content: userMessage.content, id: "temp-id" },
88
+ ]);
89
  } catch (error) {
90
  console.error("Error generating title:", error);
91
  }
 
96
  id,
97
  userId,
98
  title,
99
+ messages: [], // Messages will be saved by the client
100
  });
101
  } catch (error) {
102
  console.error("Error saving new chat:", error);
103
  }
104
  }
105
 
 
 
 
106
  const { tools, cleanup } = await initializeMCPClients(mcpServers, req.signal);
107
 
108
+ const completion = await openai.chat.completions.create(
109
+ {
110
+ model: selectedModel,
111
+ stream: true,
112
+ messages,
113
+ tools: mcpToolsToOpenAITools(tools),
114
+ tool_choice: "auto",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
115
  },
116
+ { signal: req.signal }
117
+ );
 
 
 
 
118
 
119
+ const stream = createOpenAIStream(completion, {
120
+ onFinal() {
121
+ cleanup();
122
+ },
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
123
  });
124
 
125
+ return new Response(stream, {
 
 
 
126
  headers: {
127
+ "Content-Type": "text/event-stream",
128
+ "X-Chat-ID": id,
 
 
 
 
 
 
 
 
129
  },
130
  });
131
+ }
components/chat.tsx CHANGED
@@ -1,6 +1,6 @@
1
  "use client";
2
 
3
- import { defaultModel, type modelID } from "@/ai/providers";
4
  import { Message, useChat } from "@ai-sdk/react";
5
  import { useState, useEffect, useMemo, useCallback } from "react";
6
  import { Textarea } from "./textarea";
@@ -30,7 +30,7 @@ export default function Chat() {
30
  const chatId = params?.id as string | undefined;
31
  const queryClient = useQueryClient();
32
 
33
- const [selectedModel, setSelectedModel] = useLocalStorage<modelID>("selectedModel", defaultModel);
34
  const [userId, setUserId] = useState<string>('');
35
  const [generatedChatId, setGeneratedChatId] = useState<string>('');
36
 
 
1
  "use client";
2
 
3
+ import { DEFAULT_MODEL, type ModelID } from "@/lib/models";
4
  import { Message, useChat } from "@ai-sdk/react";
5
  import { useState, useEffect, useMemo, useCallback } from "react";
6
  import { Textarea } from "./textarea";
 
30
  const chatId = params?.id as string | undefined;
31
  const queryClient = useQueryClient();
32
 
33
+ const [selectedModel, setSelectedModel] = useLocalStorage<ModelID>("selectedModel", DEFAULT_MODEL);
34
  const [userId, setUserId] = useState<string>('');
35
  const [generatedChatId, setGeneratedChatId] = useState<string>('');
36
 
components/model-picker.tsx CHANGED
@@ -1,5 +1,5 @@
1
  "use client";
2
- import { MODELS, modelDetails, type modelID, defaultModel } from "@/ai/providers";
3
  import {
4
  Select,
5
  SelectContent,
@@ -9,232 +9,76 @@ import {
9
  SelectValue,
10
  } from "./ui/select";
11
  import { cn } from "@/lib/utils";
12
- import { Sparkles, Zap, Info, Bolt, Code, Brain, Lightbulb, Image, Gauge, Rocket, Bot } from "lucide-react";
13
- import { useState, useEffect } from "react";
14
 
15
  interface ModelPickerProps {
16
- selectedModel: modelID;
17
- setSelectedModel: (model: modelID) => void;
18
  }
19
 
20
  export const ModelPicker = ({ selectedModel, setSelectedModel }: ModelPickerProps) => {
21
- const [hoveredModel, setHoveredModel] = useState<modelID | null>(null);
22
-
23
  // Ensure we always have a valid model ID
24
- const validModelId = MODELS.includes(selectedModel) ? selectedModel : defaultModel;
25
 
26
  // If the selected model is invalid, update it to the default
27
  useEffect(() => {
28
  if (selectedModel !== validModelId) {
29
- setSelectedModel(validModelId as modelID);
30
  }
31
  }, [selectedModel, validModelId, setSelectedModel]);
32
-
33
- // Function to get the appropriate icon for each provider
34
- const getProviderIcon = (provider: string) => {
35
- switch (provider.toLowerCase()) {
36
- case 'anthropic':
37
- return <Sparkles className="h-3 w-3 text-orange-600" />;
38
- case 'openai':
39
- return <Zap className="h-3 w-3 text-green-500" />;
40
- case 'google':
41
- return <Zap className="h-3 w-3 text-red-500" />;
42
- case 'groq':
43
- return <Sparkles className="h-3 w-3 text-blue-500" />;
44
- case 'xai':
45
- return <Sparkles className="h-3 w-3 text-yellow-500" />;
46
- default:
47
- return <Info className="h-3 w-3 text-blue-500" />;
48
- }
49
- };
50
-
51
- // Function to get capability icon
52
- const getCapabilityIcon = (capability: string) => {
53
- switch (capability.toLowerCase()) {
54
- case 'code':
55
- return <Code className="h-2.5 w-2.5" />;
56
- case 'reasoning':
57
- return <Brain className="h-2.5 w-2.5" />;
58
- case 'research':
59
- return <Lightbulb className="h-2.5 w-2.5" />;
60
- case 'vision':
61
- return <Image className="h-2.5 w-2.5" />;
62
- case 'fast':
63
- case 'rapid':
64
- return <Bolt className="h-2.5 w-2.5" />;
65
- case 'efficient':
66
- case 'compact':
67
- return <Gauge className="h-2.5 w-2.5" />;
68
- case 'creative':
69
- case 'balance':
70
- return <Rocket className="h-2.5 w-2.5" />;
71
- case 'agentic':
72
- return <Bot className="h-2.5 w-2.5" />;
73
- default:
74
- return <Info className="h-2.5 w-2.5" />;
75
- }
76
- };
77
-
78
- // Get capability badge color
79
- const getCapabilityColor = (capability: string) => {
80
- switch (capability.toLowerCase()) {
81
- case 'code':
82
- return "bg-blue-100 text-blue-800 dark:bg-blue-900/30 dark:text-blue-300";
83
- case 'reasoning':
84
- case 'research':
85
- return "bg-purple-100 text-purple-800 dark:bg-purple-900/30 dark:text-purple-300";
86
- case 'vision':
87
- return "bg-indigo-100 text-indigo-800 dark:bg-indigo-900/30 dark:text-indigo-300";
88
- case 'fast':
89
- case 'rapid':
90
- return "bg-amber-100 text-amber-800 dark:bg-amber-900/30 dark:text-amber-300";
91
- case 'efficient':
92
- case 'compact':
93
- return "bg-emerald-100 text-emerald-800 dark:bg-emerald-900/30 dark:text-emerald-300";
94
- case 'creative':
95
- case 'balance':
96
- return "bg-rose-100 text-rose-800 dark:bg-rose-900/30 dark:text-rose-300";
97
- case 'agentic':
98
- return "bg-cyan-100 text-cyan-800 dark:bg-cyan-900/30 dark:text-cyan-300";
99
- default:
100
- return "bg-gray-100 text-gray-800 dark:bg-gray-800 dark:text-gray-300";
101
- }
102
- };
103
-
104
- // Get current model details to display
105
- const displayModelId = hoveredModel || validModelId;
106
- const currentModelDetails = modelDetails[displayModelId];
107
 
108
  // Handle model change
109
  const handleModelChange = (modelId: string) => {
110
- if (MODELS.includes(modelId)) {
111
- const typedModelId = modelId as modelID;
112
- setSelectedModel(typedModelId);
113
  }
114
  };
115
 
116
  return (
117
  <div className="absolute bottom-2 left-2 z-10">
118
- <Select
119
- value={validModelId}
120
- onValueChange={handleModelChange}
121
  defaultValue={validModelId}
122
  >
123
- <SelectTrigger
124
  className="max-w-[200px] sm:max-w-fit sm:w-56 px-2 sm:px-3 h-8 sm:h-9 rounded-full group border-primary/20 bg-primary/5 hover:bg-primary/10 dark:bg-primary/10 dark:hover:bg-primary/20 transition-all duration-200 ring-offset-background focus:ring-2 focus:ring-primary/30 focus:ring-offset-2"
125
  >
126
- <SelectValue
127
- placeholder="Select model"
128
  className="text-xs font-medium flex items-center gap-1 sm:gap-2 text-primary dark:text-primary-foreground"
129
  >
130
  <div className="flex items-center gap-1 sm:gap-2">
131
- {getProviderIcon(modelDetails[validModelId].provider)}
132
- <span className="font-medium truncate">{modelDetails[validModelId].name}</span>
133
  </div>
134
  </SelectValue>
135
  </SelectTrigger>
136
  <SelectContent
137
  align="start"
138
- className="bg-background/95 dark:bg-muted/95 backdrop-blur-sm border-border/80 rounded-lg overflow-hidden p-0 w-[280px] sm:w-[350px] md:w-[515px]"
139
  >
140
- <div className="grid grid-cols-1 sm:grid-cols-[120px_1fr] md:grid-cols-[200px_1fr] items-start">
141
- {/* Model selector column */}
142
- <div className="sm:border-r border-border/40 bg-muted/20 p-0 pr-1">
143
- <SelectGroup className="space-y-1">
144
- {MODELS.map((id) => {
145
- const modelId = id as modelID;
146
- return (
147
- <SelectItem
148
- key={id}
149
- value={id}
150
- onMouseEnter={() => setHoveredModel(modelId)}
151
- onMouseLeave={() => setHoveredModel(null)}
152
- className={cn(
153
- "!px-2 sm:!px-3 py-1.5 sm:py-2 cursor-pointer rounded-md text-xs transition-colors duration-150",
154
- "hover:bg-primary/5 hover:text-primary-foreground",
155
- "focus:bg-primary/10 focus:text-primary focus:outline-none",
156
- "data-[highlighted]:bg-primary/10 data-[highlighted]:text-primary",
157
- validModelId === id && "!bg-primary/15 !text-primary font-medium"
158
- )}
159
- >
160
- <div className="flex flex-col gap-0.5">
161
- <div className="flex items-center gap-1.5">
162
- {getProviderIcon(modelDetails[modelId].provider)}
163
- <span className="font-medium truncate">{modelDetails[modelId].name}</span>
164
- </div>
165
- <span className="text-[10px] sm:text-xs text-muted-foreground">
166
- {modelDetails[modelId].provider}
167
- </span>
168
- </div>
169
- </SelectItem>
170
- );
171
- })}
172
- </SelectGroup>
173
- </div>
174
-
175
- {/* Model details column - hidden on smallest screens, visible on sm+ */}
176
- <div className="sm:block hidden p-2 sm:p-3 md:p-4 flex-col">
177
- <div>
178
- <div className="flex items-center gap-2 mb-1">
179
- {getProviderIcon(currentModelDetails.provider)}
180
- <h3 className="text-sm font-semibold">{currentModelDetails.name}</h3>
181
- </div>
182
- <div className="text-xs text-muted-foreground mb-1">
183
- Provider: <span className="font-medium">{currentModelDetails.provider}</span>
184
- </div>
185
-
186
- {/* Capability badges */}
187
- <div className="flex flex-wrap gap-1 mt-2 mb-3">
188
- {currentModelDetails.capabilities.map((capability) => (
189
- <span
190
- key={capability}
191
- className={cn(
192
- "inline-flex items-center gap-1 text-[9px] px-1.5 py-0.5 rounded-full font-medium",
193
- getCapabilityColor(capability)
194
- )}
195
- >
196
- {getCapabilityIcon(capability)}
197
- <span>{capability}</span>
198
- </span>
199
- ))}
200
- </div>
201
-
202
- <div className="text-xs text-foreground/90 leading-relaxed mb-3 hidden md:block">
203
- {currentModelDetails.description}
204
- </div>
205
- </div>
206
-
207
- <div className="bg-muted/40 rounded-md p-2 hidden md:block">
208
- <div className="text-[10px] text-muted-foreground flex justify-between items-center">
209
- <span>API Version:</span>
210
- <code className="bg-background/80 px-2 py-0.5 rounded text-[10px] font-mono">
211
- {currentModelDetails.apiVersion}
212
- </code>
213
- </div>
214
- </div>
215
- </div>
216
-
217
- {/* Condensed model details for mobile only */}
218
- <div className="p-3 sm:hidden border-t border-border/30">
219
- <div className="flex flex-wrap gap-1 mb-2">
220
- {currentModelDetails.capabilities.slice(0, 4).map((capability) => (
221
- <span
222
- key={capability}
223
- className={cn(
224
- "inline-flex items-center gap-1 text-[9px] px-1.5 py-0.5 rounded-full font-medium",
225
- getCapabilityColor(capability)
226
- )}
227
- >
228
- {getCapabilityIcon(capability)}
229
- <span>{capability}</span>
230
- </span>
231
- ))}
232
- {currentModelDetails.capabilities.length > 4 && (
233
- <span className="text-[10px] text-muted-foreground">+{currentModelDetails.capabilities.length - 4} more</span>
234
  )}
235
- </div>
236
- </div>
237
- </div>
 
 
 
 
 
238
  </SelectContent>
239
  </Select>
240
  </div>
 
1
  "use client";
2
+ import { MODELS, DEFAULT_MODEL, ModelID } from "@/lib/models";
3
  import {
4
  Select,
5
  SelectContent,
 
9
  SelectValue,
10
  } from "./ui/select";
11
  import { cn } from "@/lib/utils";
12
+ import { Bot } from "lucide-react";
13
+ import { useEffect } from "react";
14
 
15
  interface ModelPickerProps {
16
+ selectedModel: ModelID;
17
+ setSelectedModel: (model: ModelID) => void;
18
  }
19
 
20
  export const ModelPicker = ({ selectedModel, setSelectedModel }: ModelPickerProps) => {
 
 
21
  // Ensure we always have a valid model ID
22
+ const validModelId = MODELS.includes(selectedModel) ? selectedModel : DEFAULT_MODEL;
23
 
24
  // If the selected model is invalid, update it to the default
25
  useEffect(() => {
26
  if (selectedModel !== validModelId) {
27
+ setSelectedModel(validModelId);
28
  }
29
  }, [selectedModel, validModelId, setSelectedModel]);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
30
 
31
  // Handle model change
32
  const handleModelChange = (modelId: string) => {
33
+ if (MODELS.includes(modelId as ModelID)) {
34
+ setSelectedModel(modelId as ModelID);
 
35
  }
36
  };
37
 
38
  return (
39
  <div className="absolute bottom-2 left-2 z-10">
40
+ <Select
41
+ value={validModelId}
42
+ onValueChange={handleModelChange}
43
  defaultValue={validModelId}
44
  >
45
+ <SelectTrigger
46
  className="max-w-[200px] sm:max-w-fit sm:w-56 px-2 sm:px-3 h-8 sm:h-9 rounded-full group border-primary/20 bg-primary/5 hover:bg-primary/10 dark:bg-primary/10 dark:hover:bg-primary/20 transition-all duration-200 ring-offset-background focus:ring-2 focus:ring-primary/30 focus:ring-offset-2"
47
  >
48
+ <SelectValue
49
+ placeholder="Select model"
50
  className="text-xs font-medium flex items-center gap-1 sm:gap-2 text-primary dark:text-primary-foreground"
51
  >
52
  <div className="flex items-center gap-1 sm:gap-2">
53
+ <Bot className="h-3 w-3" />
54
+ <span className="font-medium truncate">{validModelId}</span>
55
  </div>
56
  </SelectValue>
57
  </SelectTrigger>
58
  <SelectContent
59
  align="start"
60
+ className="bg-background/95 dark:bg-muted/95 backdrop-blur-sm border-border/80 rounded-lg overflow-hidden p-0 w-[280px]"
61
  >
62
+ <SelectGroup className="space-y-1 p-1">
63
+ {MODELS.map((id) => (
64
+ <SelectItem
65
+ key={id}
66
+ value={id}
67
+ className={cn(
68
+ "!px-2 sm:!px-3 py-1.5 sm:py-2 cursor-pointer rounded-md text-xs transition-colors duration-150",
69
+ "hover:bg-primary/5 hover:text-primary-foreground",
70
+ "focus:bg-primary/10 focus:text-primary focus:outline-none",
71
+ "data-[highlighted]:bg-primary/10 data-[highlighted]:text-primary",
72
+ validModelId === id && "!bg-primary/15 !text-primary font-medium"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
73
  )}
74
+ >
75
+ <div className="flex items-center gap-1.5">
76
+ <Bot className="h-4 w-4" />
77
+ <span className="font-medium truncate">{id}</span>
78
+ </div>
79
+ </SelectItem>
80
+ ))}
81
+ </SelectGroup>
82
  </SelectContent>
83
  </Select>
84
  </div>
lib/mcp-client.ts CHANGED
@@ -1,5 +1,6 @@
1
- import { experimental_createMCPClient as createMCPClient } from 'ai';
2
- import { StreamableHTTPClientTransport } from '@modelcontextprotocol/sdk/client/streamableHttp.js';
 
3
 
4
  export interface KeyValuePair {
5
  key: string;
@@ -8,7 +9,7 @@ export interface KeyValuePair {
8
 
9
  export interface MCPServerConfig {
10
  url: string;
11
- type: 'sse' | 'stdio';
12
  command?: string;
13
  args?: string[];
14
  env?: KeyValuePair[];
@@ -37,7 +38,7 @@ export async function initializeMCPClients(
37
  for (const mcpServer of mcpServers) {
38
  try {
39
  const headers = mcpServer.headers?.reduce((acc, header) => {
40
- if (header.key) acc[header.key] = header.value || '';
41
  return acc;
42
  }, {} as Record<string, string>);
43
 
@@ -45,22 +46,26 @@ export async function initializeMCPClients(
45
  // SSE is only when URL ends with /sse
46
  // which is the heuristic used by other clients
47
 
48
- const transport = mcpServer.url.endsWith('/sse')
49
- ? {
50
- type: 'sse' as const,
51
- url: mcpServer.url,
52
- headers,
53
- }
54
  : new StreamableHTTPClientTransport(new URL(mcpServer.url), {
55
  requestInit: {
56
  headers,
57
  },
58
  });
59
 
60
- const mcpClient = await createMCPClient({ transport });
 
 
 
 
61
  mcpClients.push(mcpClient);
62
 
63
- const mcptools = await mcpClient.tools();
64
 
65
  console.log(`MCP tools from ${mcpServer.url}:`, Object.keys(mcptools));
66
 
@@ -74,7 +79,7 @@ export async function initializeMCPClients(
74
 
75
  // Register cleanup for all clients if an abort signal is provided
76
  if (abortSignal && mcpClients.length > 0) {
77
- abortSignal.addEventListener('abort', async () => {
78
  await cleanupMCPClients(mcpClients);
79
  });
80
  }
@@ -82,7 +87,7 @@ export async function initializeMCPClients(
82
  return {
83
  tools,
84
  clients: mcpClients,
85
- cleanup: async () => await cleanupMCPClients(mcpClients)
86
  };
87
  }
88
 
@@ -95,4 +100,4 @@ async function cleanupMCPClients(clients: any[]): Promise<void> {
95
  console.error("Error closing MCP client:", error);
96
  }
97
  }
98
- }
 
1
+ import { Client as MCPClient } from "@modelcontextprotocol/sdk/client/index.js";
2
+ import { SSEClientTransport } from "@modelcontextprotocol/sdk/client/sse.js";
3
+ import { StreamableHTTPClientTransport } from "@modelcontextprotocol/sdk/client/streamableHttp.js";
4
 
5
  export interface KeyValuePair {
6
  key: string;
 
9
 
10
  export interface MCPServerConfig {
11
  url: string;
12
+ type: "sse" | "stdio";
13
  command?: string;
14
  args?: string[];
15
  env?: KeyValuePair[];
 
38
  for (const mcpServer of mcpServers) {
39
  try {
40
  const headers = mcpServer.headers?.reduce((acc, header) => {
41
+ if (header.key) acc[header.key] = header.value || "";
42
  return acc;
43
  }, {} as Record<string, string>);
44
 
 
46
  // SSE is only when URL ends with /sse
47
  // which is the heuristic used by other clients
48
 
49
+ const transport = mcpServer.url.endsWith("/sse")
50
+ ? new SSEClientTransport(new URL(mcpServer.url), {
51
+ requestInit: {
52
+ headers,
53
+ },
54
+ })
55
  : new StreamableHTTPClientTransport(new URL(mcpServer.url), {
56
  requestInit: {
57
  headers,
58
  },
59
  });
60
 
61
+ const mcpClient = new MCPClient({
62
+ name: "mcp-chat-client",
63
+ version: "0.1.0",
64
+ });
65
+ await mcpClient.connect(transport);
66
  mcpClients.push(mcpClient);
67
 
68
+ const mcptools = await mcpClient.listTools();
69
 
70
  console.log(`MCP tools from ${mcpServer.url}:`, Object.keys(mcptools));
71
 
 
79
 
80
  // Register cleanup for all clients if an abort signal is provided
81
  if (abortSignal && mcpClients.length > 0) {
82
+ abortSignal.addEventListener("abort", async () => {
83
  await cleanupMCPClients(mcpClients);
84
  });
85
  }
 
87
  return {
88
  tools,
89
  clients: mcpClients,
90
+ cleanup: async () => await cleanupMCPClients(mcpClients),
91
  };
92
  }
93
 
 
100
  console.error("Error closing MCP client:", error);
101
  }
102
  }
103
+ }
lib/models.ts ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ /**
2
+ * List here only the model IDs your endpoint exposes.
3
+ * Add/remove freely – nothing else in the codebase cares.
4
+ */
5
+ export const MODELS = ["gpt-4o-mini", "gpt-4-turbo", "gpt-3.5-turbo"] as const;
6
+
7
+ export type ModelID = (typeof MODELS)[number];
8
+
9
+ export const DEFAULT_MODEL: ModelID = "gpt-4o-mini";
lib/openai-client.ts ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ import OpenAI from "openai";
2
+
3
+ export const openai = new OpenAI({
4
+ apiKey: process.env.OPENAI_API_KEY || "",
5
+ baseURL: process.env.OPENAI_BASE_URL || undefined,
6
+ defaultHeaders: process.env.OPENAI_EXTRA_HEADERS
7
+ ? JSON.parse(process.env.OPENAI_EXTRA_HEADERS)
8
+ : undefined,
9
+ });
lib/openai-stream.ts ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import {
2
+ createParser,
3
+ type ParsedEvent,
4
+ type ReconnectInterval,
5
+ } from "eventsource-parser";
6
+ import type { ChatCompletionChunk } from "openai/resources";
7
+
8
+ export function createOpenAIStream(
9
+ completion: AsyncIterable<ChatCompletionChunk>,
10
+ callbacks: { onFinal: () => void }
11
+ ): ReadableStream {
12
+ const { readable, writable } = new TransformStream();
13
+ const writer = writable.getWriter();
14
+ const encoder = new TextEncoder();
15
+
16
+ let isFinished = false;
17
+
18
+ async function forwardCompletion() {
19
+ try {
20
+ for await (const chunk of completion) {
21
+ const { content } = chunk.choices[0].delta;
22
+ if (content) {
23
+ await writer.write(encoder.encode(`0:${JSON.stringify(content)}\n`));
24
+ }
25
+ }
26
+ } catch (error) {
27
+ console.error("Error forwarding completion:", error);
28
+ await writer.abort(error);
29
+ } finally {
30
+ isFinished = true;
31
+ await writer.close();
32
+ callbacks.onFinal();
33
+ }
34
+ }
35
+
36
+ forwardCompletion();
37
+
38
+ return readable;
39
+ }
package.json CHANGED
@@ -13,12 +13,7 @@
13
  "db:studio": "drizzle-kit studio"
14
  },
15
  "dependencies": {
16
- "@ai-sdk/anthropic": "^1.2.11",
17
- "@ai-sdk/google": "^1.2.17",
18
- "@ai-sdk/groq": "^1.2.9",
19
- "@ai-sdk/openai": "^1.3.22",
20
  "@ai-sdk/react": "^1.2.12",
21
- "@ai-sdk/xai": "^1.2.16",
22
  "@aws-sdk/client-s3": "^3.821.0",
23
  "@daytonaio/sdk": "^0.19.0",
24
  "@modelcontextprotocol/sdk": "^1.12.1",
@@ -36,10 +31,10 @@
36
  "@radix-ui/react-tooltip": "^1.2.3",
37
  "@tanstack/react-query": "^5.74.4",
38
  "@vercel/analytics": "^1.5.0",
39
- "ai": "^4.3.16",
40
  "class-variance-authority": "^0.7.1",
41
  "clsx": "^2.1.1",
42
  "drizzle-orm": "^0.44.1",
 
43
  "fast-deep-equal": "^3.1.3",
44
  "framer-motion": "^12.16.0",
45
  "lucide-react": "^0.488.0",
@@ -47,6 +42,7 @@
47
  "nanoid": "^5.1.5",
48
  "next": "^15.3.3",
49
  "next-themes": "^0.4.6",
 
50
  "or": "^0.2.0",
51
  "pg": "^8.16.0",
52
  "react": "^19.1.0",
@@ -81,5 +77,6 @@
81
  "onlyBuiltDependencies": [
82
  "@tailwindcss/oxide"
83
  ]
84
- }
 
85
  }
 
13
  "db:studio": "drizzle-kit studio"
14
  },
15
  "dependencies": {
 
 
 
 
16
  "@ai-sdk/react": "^1.2.12",
 
17
  "@aws-sdk/client-s3": "^3.821.0",
18
  "@daytonaio/sdk": "^0.19.0",
19
  "@modelcontextprotocol/sdk": "^1.12.1",
 
31
  "@radix-ui/react-tooltip": "^1.2.3",
32
  "@tanstack/react-query": "^5.74.4",
33
  "@vercel/analytics": "^1.5.0",
 
34
  "class-variance-authority": "^0.7.1",
35
  "clsx": "^2.1.1",
36
  "drizzle-orm": "^0.44.1",
37
+ "eventsource-parser": "^3.0.3",
38
  "fast-deep-equal": "^3.1.3",
39
  "framer-motion": "^12.16.0",
40
  "lucide-react": "^0.488.0",
 
42
  "nanoid": "^5.1.5",
43
  "next": "^15.3.3",
44
  "next-themes": "^0.4.6",
45
+ "openai": "^4.26.0",
46
  "or": "^0.2.0",
47
  "pg": "^8.16.0",
48
  "react": "^19.1.0",
 
77
  "onlyBuiltDependencies": [
78
  "@tailwindcss/oxide"
79
  ]
80
+ },
81
+ "packageManager": "[email protected]+sha512.d615db246fe70f25dcfea6d8d73dee782ce23e2245e3c4f6f888249fb568149318637dca73c2c5c8ef2a4ca0d5657fb9567188bfab47f566d1ee6ce987815c39"
82
  }
pnpm-lock.yaml CHANGED
The diff for this file is too large to render. See raw diff