Oliver Jägle commited on
Commit
2327de3
·
unverified ·
1 Parent(s): 424ad1e

Lint-fix all files in app

Browse files
app/components/chat/BaseChat.tsx CHANGED
@@ -1,5 +1,7 @@
1
- // @ts-nocheck
2
- // Preventing TS checks with files presented in the video for a better presentation.
 
 
3
  import type { Message } from 'ai';
4
  import React, { type RefCallback, useEffect } from 'react';
5
  import { ClientOnly } from 'remix-utils/client-only';
@@ -34,6 +36,7 @@ const ModelSelector = ({ model, setModel, provider, setProvider, modelList, prov
34
  value={provider?.name}
35
  onChange={(e) => {
36
  setProvider(providerList.find((p) => p.name === e.target.value));
 
37
  const firstModel = [...modelList].find((m) => m.provider == e.target.value);
38
  setModel(firstModel ? firstModel.name : '');
39
  }}
@@ -118,14 +121,17 @@ export const BaseChat = React.forwardRef<HTMLDivElement, BaseChatProps>(
118
  // Load API keys from cookies on component mount
119
  try {
120
  const storedApiKeys = Cookies.get('apiKeys');
 
121
  if (storedApiKeys) {
122
  const parsedKeys = JSON.parse(storedApiKeys);
 
123
  if (typeof parsedKeys === 'object' && parsedKeys !== null) {
124
  setApiKeys(parsedKeys);
125
  }
126
  }
127
  } catch (error) {
128
  console.error('Error loading API keys from cookies:', error);
 
129
  // Clear invalid cookie data
130
  Cookies.remove('apiKeys');
131
  }
@@ -139,6 +145,7 @@ export const BaseChat = React.forwardRef<HTMLDivElement, BaseChatProps>(
139
  try {
140
  const updatedApiKeys = { ...apiKeys, [provider]: key };
141
  setApiKeys(updatedApiKeys);
 
142
  // Save updated API keys to cookies with 30 day expiry and secure settings
143
  Cookies.set('apiKeys', JSON.stringify(updatedApiKeys), {
144
  expires: 30, // 30 days
 
1
+ /*
2
+ * @ts-nocheck
3
+ * Preventing TS checks with files presented in the video for a better presentation.
4
+ */
5
  import type { Message } from 'ai';
6
  import React, { type RefCallback, useEffect } from 'react';
7
  import { ClientOnly } from 'remix-utils/client-only';
 
36
  value={provider?.name}
37
  onChange={(e) => {
38
  setProvider(providerList.find((p) => p.name === e.target.value));
39
+
40
  const firstModel = [...modelList].find((m) => m.provider == e.target.value);
41
  setModel(firstModel ? firstModel.name : '');
42
  }}
 
121
  // Load API keys from cookies on component mount
122
  try {
123
  const storedApiKeys = Cookies.get('apiKeys');
124
+
125
  if (storedApiKeys) {
126
  const parsedKeys = JSON.parse(storedApiKeys);
127
+
128
  if (typeof parsedKeys === 'object' && parsedKeys !== null) {
129
  setApiKeys(parsedKeys);
130
  }
131
  }
132
  } catch (error) {
133
  console.error('Error loading API keys from cookies:', error);
134
+
135
  // Clear invalid cookie data
136
  Cookies.remove('apiKeys');
137
  }
 
145
  try {
146
  const updatedApiKeys = { ...apiKeys, [provider]: key };
147
  setApiKeys(updatedApiKeys);
148
+
149
  // Save updated API keys to cookies with 30 day expiry and secure settings
150
  Cookies.set('apiKeys', JSON.stringify(updatedApiKeys), {
151
  expires: 30, // 30 days
app/components/chat/Chat.client.tsx CHANGED
@@ -1,5 +1,7 @@
1
- // @ts-nocheck
2
- // Preventing TS checks with files presented in the video for a better presentation.
 
 
3
  import { useStore } from '@nanostores/react';
4
  import type { Message } from 'ai';
5
  import { useChat } from 'ai/react';
@@ -81,7 +83,7 @@ export const ChatImpl = memo(({ initialMessages, storeMessageHistory }: ChatProp
81
  });
82
  const [provider, setProvider] = useState(() => {
83
  const savedProvider = Cookies.get('selectedProvider');
84
- return PROVIDER_LIST.find(p => p.name === savedProvider) || DEFAULT_PROVIDER;
85
  });
86
 
87
  const { showChat } = useStore(chatStore);
@@ -93,11 +95,13 @@ export const ChatImpl = memo(({ initialMessages, storeMessageHistory }: ChatProp
93
  const { messages, isLoading, input, handleInputChange, setInput, stop, append } = useChat({
94
  api: '/api/chat',
95
  body: {
96
- apiKeys
97
  },
98
  onError: (error) => {
99
  logger.error('Request failed\n\n', error);
100
- toast.error('There was an error processing your request: ' + (error.message ? error.message : "No details were returned"));
 
 
101
  },
102
  onFinish: () => {
103
  logger.debug('Finished streaming');
@@ -218,6 +222,7 @@ export const ChatImpl = memo(({ initialMessages, storeMessageHistory }: ChatProp
218
 
219
  useEffect(() => {
220
  const storedApiKeys = Cookies.get('apiKeys');
 
221
  if (storedApiKeys) {
222
  setApiKeys(JSON.parse(storedApiKeys));
223
  }
@@ -271,7 +276,7 @@ export const ChatImpl = memo(({ initialMessages, storeMessageHistory }: ChatProp
271
  },
272
  model,
273
  provider,
274
- apiKeys
275
  );
276
  }}
277
  />
 
1
+ /*
2
+ * @ts-nocheck
3
+ * Preventing TS checks with files presented in the video for a better presentation.
4
+ */
5
  import { useStore } from '@nanostores/react';
6
  import type { Message } from 'ai';
7
  import { useChat } from 'ai/react';
 
83
  });
84
  const [provider, setProvider] = useState(() => {
85
  const savedProvider = Cookies.get('selectedProvider');
86
+ return PROVIDER_LIST.find((p) => p.name === savedProvider) || DEFAULT_PROVIDER;
87
  });
88
 
89
  const { showChat } = useStore(chatStore);
 
95
  const { messages, isLoading, input, handleInputChange, setInput, stop, append } = useChat({
96
  api: '/api/chat',
97
  body: {
98
+ apiKeys,
99
  },
100
  onError: (error) => {
101
  logger.error('Request failed\n\n', error);
102
+ toast.error(
103
+ 'There was an error processing your request: ' + (error.message ? error.message : 'No details were returned'),
104
+ );
105
  },
106
  onFinish: () => {
107
  logger.debug('Finished streaming');
 
222
 
223
  useEffect(() => {
224
  const storedApiKeys = Cookies.get('apiKeys');
225
+
226
  if (storedApiKeys) {
227
  setApiKeys(JSON.parse(storedApiKeys));
228
  }
 
276
  },
277
  model,
278
  provider,
279
+ apiKeys,
280
  );
281
  }}
282
  />
app/components/chat/UserMessage.tsx CHANGED
@@ -1,5 +1,7 @@
1
- // @ts-nocheck
2
- // Preventing TS checks with files presented in the video for a better presentation.
 
 
3
  import { modificationsRegex } from '~/utils/diff';
4
  import { MODEL_REGEX, PROVIDER_REGEX } from '~/utils/constants';
5
  import { Markdown } from './Markdown';
@@ -17,5 +19,9 @@ export function UserMessage({ content }: UserMessageProps) {
17
  }
18
 
19
  function sanitizeUserMessage(content: string) {
20
- return content.replace(modificationsRegex, '').replace(MODEL_REGEX, 'Using: $1').replace(PROVIDER_REGEX, ' ($1)\n\n').trim();
 
 
 
 
21
  }
 
1
+ /*
2
+ * @ts-nocheck
3
+ * Preventing TS checks with files presented in the video for a better presentation.
4
+ */
5
  import { modificationsRegex } from '~/utils/diff';
6
  import { MODEL_REGEX, PROVIDER_REGEX } from '~/utils/constants';
7
  import { Markdown } from './Markdown';
 
19
  }
20
 
21
  function sanitizeUserMessage(content: string) {
22
+ return content
23
+ .replace(modificationsRegex, '')
24
+ .replace(MODEL_REGEX, 'Using: $1')
25
+ .replace(PROVIDER_REGEX, ' ($1)\n\n')
26
+ .trim();
27
  }
app/components/workbench/EditorPanel.tsx CHANGED
@@ -255,6 +255,7 @@ export const EditorPanel = memo(
255
  </div>
256
  {Array.from({ length: terminalCount + 1 }, (_, index) => {
257
  const isActive = activeTerminal === index;
 
258
  if (index == 0) {
259
  logger.info('Starting bolt terminal');
260
 
@@ -273,6 +274,7 @@ export const EditorPanel = memo(
273
  />
274
  );
275
  }
 
276
  return (
277
  <Terminal
278
  key={index}
 
255
  </div>
256
  {Array.from({ length: terminalCount + 1 }, (_, index) => {
257
  const isActive = activeTerminal === index;
258
+
259
  if (index == 0) {
260
  logger.info('Starting bolt terminal');
261
 
 
274
  />
275
  );
276
  }
277
+
278
  return (
279
  <Terminal
280
  key={index}
app/components/workbench/FileTree.tsx CHANGED
@@ -111,7 +111,7 @@ export const FileTree = memo(
111
  };
112
 
113
  return (
114
- <div className={classNames('text-sm', className ,'overflow-y-auto')}>
115
  {filteredFileList.map((fileOrFolder) => {
116
  switch (fileOrFolder.kind) {
117
  case 'file': {
 
111
  };
112
 
113
  return (
114
+ <div className={classNames('text-sm', className, 'overflow-y-auto')}>
115
  {filteredFileList.map((fileOrFolder) => {
116
  switch (fileOrFolder.kind) {
117
  case 'file': {
app/components/workbench/Workbench.client.tsx CHANGED
@@ -174,16 +174,21 @@ export const Workbench = memo(({ chatStarted, isStreaming }: WorkspaceProps) =>
174
  'Please enter a name for your new GitHub repository:',
175
  'bolt-generated-project',
176
  );
 
177
  if (!repoName) {
178
  alert('Repository name is required. Push to GitHub cancelled.');
179
  return;
180
  }
 
181
  const githubUsername = prompt('Please enter your GitHub username:');
 
182
  if (!githubUsername) {
183
  alert('GitHub username is required. Push to GitHub cancelled.');
184
  return;
185
  }
 
186
  const githubToken = prompt('Please enter your GitHub personal access token:');
 
187
  if (!githubToken) {
188
  alert('GitHub token is required. Push to GitHub cancelled.');
189
  return;
 
174
  'Please enter a name for your new GitHub repository:',
175
  'bolt-generated-project',
176
  );
177
+
178
  if (!repoName) {
179
  alert('Repository name is required. Push to GitHub cancelled.');
180
  return;
181
  }
182
+
183
  const githubUsername = prompt('Please enter your GitHub username:');
184
+
185
  if (!githubUsername) {
186
  alert('GitHub username is required. Push to GitHub cancelled.');
187
  return;
188
  }
189
+
190
  const githubToken = prompt('Please enter your GitHub personal access token:');
191
+
192
  if (!githubToken) {
193
  alert('GitHub token is required. Push to GitHub cancelled.');
194
  return;
app/lib/.server/llm/api-key.ts CHANGED
@@ -1,5 +1,7 @@
1
- // @ts-nocheck
2
- // Preventing TS checks with files presented in the video for a better presentation.
 
 
3
  import { env } from 'node:process';
4
 
5
  export function getAPIKey(cloudflareEnv: Env, provider: string, userApiKeys?: Record<string, string>) {
@@ -28,17 +30,19 @@ export function getAPIKey(cloudflareEnv: Env, provider: string, userApiKeys?: Re
28
  case 'OpenRouter':
29
  return env.OPEN_ROUTER_API_KEY || cloudflareEnv.OPEN_ROUTER_API_KEY;
30
  case 'Deepseek':
31
- return env.DEEPSEEK_API_KEY || cloudflareEnv.DEEPSEEK_API_KEY
32
  case 'Mistral':
33
- return env.MISTRAL_API_KEY || cloudflareEnv.MISTRAL_API_KEY;
34
- case "OpenAILike":
35
  return env.OPENAI_LIKE_API_KEY || cloudflareEnv.OPENAI_LIKE_API_KEY;
36
- case "xAI":
37
  return env.XAI_API_KEY || cloudflareEnv.XAI_API_KEY;
38
- case "Cohere":
39
  return env.COHERE_API_KEY;
 
 
40
  default:
41
- return "";
42
  }
43
  }
44
 
@@ -47,14 +51,16 @@ export function getBaseURL(cloudflareEnv: Env, provider: string) {
47
  case 'OpenAILike':
48
  return env.OPENAI_LIKE_API_BASE_URL || cloudflareEnv.OPENAI_LIKE_API_BASE_URL;
49
  case 'LMStudio':
50
- return env.LMSTUDIO_API_BASE_URL || cloudflareEnv.LMSTUDIO_API_BASE_URL || "http://localhost:1234";
51
  case 'Ollama':
52
- let baseUrl = env.OLLAMA_API_BASE_URL || cloudflareEnv.OLLAMA_API_BASE_URL || "http://localhost:11434";
53
- if (env.RUNNING_IN_DOCKER === 'true') {
54
- baseUrl = baseUrl.replace("localhost", "host.docker.internal");
55
- }
56
- return baseUrl;
 
 
57
  default:
58
- return "";
59
  }
60
  }
 
1
+ /*
2
+ * @ts-nocheck
3
+ * Preventing TS checks with files presented in the video for a better presentation.
4
+ */
5
  import { env } from 'node:process';
6
 
7
  export function getAPIKey(cloudflareEnv: Env, provider: string, userApiKeys?: Record<string, string>) {
 
30
  case 'OpenRouter':
31
  return env.OPEN_ROUTER_API_KEY || cloudflareEnv.OPEN_ROUTER_API_KEY;
32
  case 'Deepseek':
33
+ return env.DEEPSEEK_API_KEY || cloudflareEnv.DEEPSEEK_API_KEY;
34
  case 'Mistral':
35
+ return env.MISTRAL_API_KEY || cloudflareEnv.MISTRAL_API_KEY;
36
+ case 'OpenAILike':
37
  return env.OPENAI_LIKE_API_KEY || cloudflareEnv.OPENAI_LIKE_API_KEY;
38
+ case 'xAI':
39
  return env.XAI_API_KEY || cloudflareEnv.XAI_API_KEY;
40
+ case 'Cohere':
41
  return env.COHERE_API_KEY;
42
+ case 'AzureOpenAI':
43
+ return env.AZURE_OPENAI_API_KEY;
44
  default:
45
+ return '';
46
  }
47
  }
48
 
 
51
  case 'OpenAILike':
52
  return env.OPENAI_LIKE_API_BASE_URL || cloudflareEnv.OPENAI_LIKE_API_BASE_URL;
53
  case 'LMStudio':
54
+ return env.LMSTUDIO_API_BASE_URL || cloudflareEnv.LMSTUDIO_API_BASE_URL || 'http://localhost:1234';
55
  case 'Ollama':
56
+ let baseUrl = env.OLLAMA_API_BASE_URL || cloudflareEnv.OLLAMA_API_BASE_URL || 'http://localhost:11434';
57
+
58
+ if (env.RUNNING_IN_DOCKER === 'true') {
59
+ baseUrl = baseUrl.replace('localhost', 'host.docker.internal');
60
+ }
61
+
62
+ return baseUrl;
63
  default:
64
+ return '';
65
  }
66
  }
app/lib/.server/llm/model.ts CHANGED
@@ -1,13 +1,15 @@
1
- // @ts-nocheck
2
- // Preventing TS checks with files presented in the video for a better presentation.
 
 
3
  import { getAPIKey, getBaseURL } from '~/lib/.server/llm/api-key';
4
  import { createAnthropic } from '@ai-sdk/anthropic';
5
  import { createOpenAI } from '@ai-sdk/openai';
6
  import { createGoogleGenerativeAI } from '@ai-sdk/google';
7
  import { ollama } from 'ollama-ai-provider';
8
- import { createOpenRouter } from "@openrouter/ai-sdk-provider";
9
  import { createMistral } from '@ai-sdk/mistral';
10
- import { createCohere } from '@ai-sdk/cohere'
11
 
12
  export function getAnthropicModel(apiKey: string, model: string) {
13
  const anthropic = createAnthropic({
@@ -16,7 +18,7 @@ export function getAnthropicModel(apiKey: string, model: string) {
16
 
17
  return anthropic(model);
18
  }
19
- export function getOpenAILikeModel(baseURL:string,apiKey: string, model: string) {
20
  const openai = createOpenAI({
21
  baseURL,
22
  apiKey,
@@ -25,7 +27,7 @@ export function getOpenAILikeModel(baseURL:string,apiKey: string, model: string)
25
  return openai(model);
26
  }
27
 
28
- export function getCohereAIModel(apiKey:string, model: string){
29
  const cohere = createCohere({
30
  apiKey,
31
  });
@@ -43,7 +45,7 @@ export function getOpenAIModel(apiKey: string, model: string) {
43
 
44
  export function getMistralModel(apiKey: string, model: string) {
45
  const mistral = createMistral({
46
- apiKey
47
  });
48
 
49
  return mistral(model);
@@ -76,15 +78,16 @@ export function getHuggingFaceModel(apiKey: string, model: string) {
76
  }
77
 
78
  export function getOllamaModel(baseURL: string, model: string) {
79
- let Ollama = ollama(model, {
80
  numCtx: 32768,
81
  });
82
 
83
  Ollama.config.baseURL = `${baseURL}/api`;
 
84
  return Ollama;
85
  }
86
 
87
- export function getDeepseekModel(apiKey: string, model: string){
88
  const openai = createOpenAI({
89
  baseURL: 'https://api.deepseek.com/beta',
90
  apiKey,
@@ -95,7 +98,7 @@ export function getDeepseekModel(apiKey: string, model: string){
95
 
96
  export function getOpenRouterModel(apiKey: string, model: string) {
97
  const openRouter = createOpenRouter({
98
- apiKey
99
  });
100
 
101
  return openRouter.chat(model);
@@ -104,7 +107,7 @@ export function getOpenRouterModel(apiKey: string, model: string) {
104
  export function getLMStudioModel(baseURL: string, model: string) {
105
  const lmstudio = createOpenAI({
106
  baseUrl: `${baseURL}/v1`,
107
- apiKey: "",
108
  });
109
 
110
  return lmstudio(model);
@@ -119,7 +122,6 @@ export function getXAIModel(apiKey: string, model: string) {
119
  return openai(model);
120
  }
121
 
122
-
123
  export function getModel(provider: string, model: string, env: Env, apiKeys?: Record<string, string>) {
124
  const apiKey = getAPIKey(env, provider, apiKeys);
125
  const baseURL = getBaseURL(env, provider);
@@ -138,11 +140,11 @@ export function getModel(provider: string, model: string, env: Env, apiKeys?: Re
138
  case 'Google':
139
  return getGoogleModel(apiKey, model);
140
  case 'OpenAILike':
141
- return getOpenAILikeModel(baseURL,apiKey, model);
142
  case 'Deepseek':
143
  return getDeepseekModel(apiKey, model);
144
  case 'Mistral':
145
- return getMistralModel(apiKey, model);
146
  case 'LMStudio':
147
  return getLMStudioModel(baseURL, model);
148
  case 'xAI':
 
1
+ /*
2
+ * @ts-nocheck
3
+ * Preventing TS checks with files presented in the video for a better presentation.
4
+ */
5
  import { getAPIKey, getBaseURL } from '~/lib/.server/llm/api-key';
6
  import { createAnthropic } from '@ai-sdk/anthropic';
7
  import { createOpenAI } from '@ai-sdk/openai';
8
  import { createGoogleGenerativeAI } from '@ai-sdk/google';
9
  import { ollama } from 'ollama-ai-provider';
10
+ import { createOpenRouter } from '@openrouter/ai-sdk-provider';
11
  import { createMistral } from '@ai-sdk/mistral';
12
+ import { createCohere } from '@ai-sdk/cohere';
13
 
14
  export function getAnthropicModel(apiKey: string, model: string) {
15
  const anthropic = createAnthropic({
 
18
 
19
  return anthropic(model);
20
  }
21
+ export function getOpenAILikeModel(baseURL: string, apiKey: string, model: string) {
22
  const openai = createOpenAI({
23
  baseURL,
24
  apiKey,
 
27
  return openai(model);
28
  }
29
 
30
+ export function getCohereAIModel(apiKey: string, model: string) {
31
  const cohere = createCohere({
32
  apiKey,
33
  });
 
45
 
46
  export function getMistralModel(apiKey: string, model: string) {
47
  const mistral = createMistral({
48
+ apiKey,
49
  });
50
 
51
  return mistral(model);
 
78
  }
79
 
80
  export function getOllamaModel(baseURL: string, model: string) {
81
+ const Ollama = ollama(model, {
82
  numCtx: 32768,
83
  });
84
 
85
  Ollama.config.baseURL = `${baseURL}/api`;
86
+
87
  return Ollama;
88
  }
89
 
90
+ export function getDeepseekModel(apiKey: string, model: string) {
91
  const openai = createOpenAI({
92
  baseURL: 'https://api.deepseek.com/beta',
93
  apiKey,
 
98
 
99
  export function getOpenRouterModel(apiKey: string, model: string) {
100
  const openRouter = createOpenRouter({
101
+ apiKey,
102
  });
103
 
104
  return openRouter.chat(model);
 
107
  export function getLMStudioModel(baseURL: string, model: string) {
108
  const lmstudio = createOpenAI({
109
  baseUrl: `${baseURL}/v1`,
110
+ apiKey: '',
111
  });
112
 
113
  return lmstudio(model);
 
122
  return openai(model);
123
  }
124
 
 
125
  export function getModel(provider: string, model: string, env: Env, apiKeys?: Record<string, string>) {
126
  const apiKey = getAPIKey(env, provider, apiKeys);
127
  const baseURL = getBaseURL(env, provider);
 
140
  case 'Google':
141
  return getGoogleModel(apiKey, model);
142
  case 'OpenAILike':
143
+ return getOpenAILikeModel(baseURL, apiKey, model);
144
  case 'Deepseek':
145
  return getDeepseekModel(apiKey, model);
146
  case 'Mistral':
147
+ return getMistralModel(apiKey, model);
148
  case 'LMStudio':
149
  return getLMStudioModel(baseURL, model);
150
  case 'xAI':
app/lib/.server/llm/stream-text.ts CHANGED
@@ -1,5 +1,7 @@
1
- // @ts-nocheck
2
- // Preventing TS checks with files presented in the video for a better presentation.
 
 
3
  import { streamText as _streamText, convertToCoreMessages } from 'ai';
4
  import { getModel } from '~/lib/.server/llm/model';
5
  import { MAX_TOKENS } from './constants';
@@ -34,19 +36,12 @@ function extractPropertiesFromMessage(message: Message): { model: string; provid
34
  const provider = providerMatch ? providerMatch[1] : DEFAULT_PROVIDER;
35
 
36
  // Remove model and provider lines from content
37
- const cleanedContent = message.content
38
- .replace(MODEL_REGEX, '')
39
- .replace(PROVIDER_REGEX, '')
40
- .trim();
41
 
42
  return { model, provider, content: cleanedContent };
43
  }
44
- export function streamText(
45
- messages: Messages,
46
- env: Env,
47
- options?: StreamingOptions,
48
- apiKeys?: Record<string, string>
49
- ) {
50
  let currentModel = DEFAULT_MODEL;
51
  let currentProvider = DEFAULT_PROVIDER;
52
 
@@ -63,17 +58,12 @@ export function streamText(
63
  return { ...message, content };
64
  }
65
 
66
- return message;
67
  });
68
 
69
  const modelDetails = MODEL_LIST.find((m) => m.name === currentModel);
70
 
71
-
72
-
73
- const dynamicMaxTokens =
74
- modelDetails && modelDetails.maxTokenAllowed
75
- ? modelDetails.maxTokenAllowed
76
- : MAX_TOKENS;
77
 
78
  return _streamText({
79
  model: getModel(currentProvider, currentModel, env, apiKeys),
 
1
+ /*
2
+ * @ts-nocheck
3
+ * Preventing TS checks with files presented in the video for a better presentation.
4
+ */
5
  import { streamText as _streamText, convertToCoreMessages } from 'ai';
6
  import { getModel } from '~/lib/.server/llm/model';
7
  import { MAX_TOKENS } from './constants';
 
36
  const provider = providerMatch ? providerMatch[1] : DEFAULT_PROVIDER;
37
 
38
  // Remove model and provider lines from content
39
+ const cleanedContent = message.content.replace(MODEL_REGEX, '').replace(PROVIDER_REGEX, '').trim();
 
 
 
40
 
41
  return { model, provider, content: cleanedContent };
42
  }
43
+
44
+ export function streamText(messages: Messages, env: Env, options?: StreamingOptions, apiKeys?: Record<string, string>) {
 
 
 
 
45
  let currentModel = DEFAULT_MODEL;
46
  let currentProvider = DEFAULT_PROVIDER;
47
 
 
58
  return { ...message, content };
59
  }
60
 
61
+ return message;
62
  });
63
 
64
  const modelDetails = MODEL_LIST.find((m) => m.name === currentModel);
65
 
66
+ const dynamicMaxTokens = modelDetails && modelDetails.maxTokenAllowed ? modelDetails.maxTokenAllowed : MAX_TOKENS;
 
 
 
 
 
67
 
68
  return _streamText({
69
  model: getModel(currentProvider, currentModel, env, apiKeys),
app/lib/persistence/db.ts CHANGED
@@ -161,11 +161,17 @@ async function getUrlIds(db: IDBDatabase): Promise<string[]> {
161
 
162
  export async function forkChat(db: IDBDatabase, chatId: string, messageId: string): Promise<string> {
163
  const chat = await getMessages(db, chatId);
164
- if (!chat) throw new Error('Chat not found');
 
 
 
165
 
166
  // Find the index of the message to fork at
167
- const messageIndex = chat.messages.findIndex(msg => msg.id === messageId);
168
- if (messageIndex === -1) throw new Error('Message not found');
 
 
 
169
 
170
  // Get messages up to and including the selected message
171
  const messages = chat.messages.slice(0, messageIndex + 1);
@@ -175,19 +181,14 @@ export async function forkChat(db: IDBDatabase, chatId: string, messageId: strin
175
  const urlId = await getUrlId(db, newId);
176
 
177
  // Create the forked chat
178
- await setMessages(
179
- db,
180
- newId,
181
- messages,
182
- urlId,
183
- chat.description ? `${chat.description} (fork)` : 'Forked chat'
184
- );
185
 
186
  return urlId;
187
  }
188
 
189
  export async function duplicateChat(db: IDBDatabase, id: string): Promise<string> {
190
  const chat = await getMessages(db, id);
 
191
  if (!chat) {
192
  throw new Error('Chat not found');
193
  }
@@ -200,7 +201,7 @@ export async function duplicateChat(db: IDBDatabase, id: string): Promise<string
200
  newId,
201
  chat.messages,
202
  newUrlId, // Use the new urlId
203
- `${chat.description || 'Chat'} (copy)`
204
  );
205
 
206
  return newUrlId; // Return the urlId instead of id for navigation
 
161
 
162
  export async function forkChat(db: IDBDatabase, chatId: string, messageId: string): Promise<string> {
163
  const chat = await getMessages(db, chatId);
164
+
165
+ if (!chat) {
166
+ throw new Error('Chat not found');
167
+ }
168
 
169
  // Find the index of the message to fork at
170
+ const messageIndex = chat.messages.findIndex((msg) => msg.id === messageId);
171
+
172
+ if (messageIndex === -1) {
173
+ throw new Error('Message not found');
174
+ }
175
 
176
  // Get messages up to and including the selected message
177
  const messages = chat.messages.slice(0, messageIndex + 1);
 
181
  const urlId = await getUrlId(db, newId);
182
 
183
  // Create the forked chat
184
+ await setMessages(db, newId, messages, urlId, chat.description ? `${chat.description} (fork)` : 'Forked chat');
 
 
 
 
 
 
185
 
186
  return urlId;
187
  }
188
 
189
  export async function duplicateChat(db: IDBDatabase, id: string): Promise<string> {
190
  const chat = await getMessages(db, id);
191
+
192
  if (!chat) {
193
  throw new Error('Chat not found');
194
  }
 
201
  newId,
202
  chat.messages,
203
  newUrlId, // Use the new urlId
204
+ `${chat.description || 'Chat'} (copy)`,
205
  );
206
 
207
  return newUrlId; // Return the urlId instead of id for navigation
app/lib/persistence/useChatHistory.ts CHANGED
@@ -99,7 +99,7 @@ export function useChatHistory() {
99
 
100
  await setMessages(db, chatId.get() as string, messages, urlId, description.get());
101
  },
102
- duplicateCurrentChat: async (listItemId:string) => {
103
  if (!db || (!mixedId && !listItemId)) {
104
  return;
105
  }
@@ -111,7 +111,7 @@ export function useChatHistory() {
111
  } catch (error) {
112
  toast.error('Failed to duplicate chat');
113
  }
114
- }
115
  };
116
  }
117
 
 
99
 
100
  await setMessages(db, chatId.get() as string, messages, urlId, description.get());
101
  },
102
+ duplicateCurrentChat: async (listItemId: string) => {
103
  if (!db || (!mixedId && !listItemId)) {
104
  return;
105
  }
 
111
  } catch (error) {
112
  toast.error('Failed to duplicate chat');
113
  }
114
+ },
115
  };
116
  }
117
 
app/lib/runtime/action-runner.ts CHANGED
@@ -45,7 +45,6 @@ export class ActionRunner {
45
  constructor(webcontainerPromise: Promise<WebContainer>, getShellTerminal: () => BoltShell) {
46
  this.#webcontainer = webcontainerPromise;
47
  this.#shellTerminal = getShellTerminal;
48
-
49
  }
50
 
51
  addAction(data: ActionCallbackData) {
@@ -88,19 +87,20 @@ export class ActionRunner {
88
  if (action.executed) {
89
  return;
90
  }
 
91
  if (isStreaming && action.type !== 'file') {
92
  return;
93
  }
94
 
95
  this.#updateAction(actionId, { ...action, ...data.action, executed: !isStreaming });
96
 
97
- return this.#currentExecutionPromise = this.#currentExecutionPromise
98
  .then(() => {
99
  return this.#executeAction(actionId, isStreaming);
100
  })
101
  .catch((error) => {
102
  console.error('Action failed:', error);
103
- });
104
  }
105
 
106
  async #executeAction(actionId: string, isStreaming: boolean = false) {
@@ -121,17 +121,24 @@ export class ActionRunner {
121
  case 'start': {
122
  // making the start app non blocking
123
 
124
- this.#runStartAction(action).then(()=>this.#updateAction(actionId, { status: 'complete' }))
125
- .catch(()=>this.#updateAction(actionId, { status: 'failed', error: 'Action failed' }))
126
- // adding a delay to avoid any race condition between 2 start actions
127
- // i am up for a better approch
128
- await new Promise(resolve=>setTimeout(resolve,2000))
129
- return
 
 
 
 
 
130
  break;
131
  }
132
  }
133
 
134
- this.#updateAction(actionId, { status: isStreaming ? 'running' : action.abortSignal.aborted ? 'aborted' : 'complete' });
 
 
135
  } catch (error) {
136
  this.#updateAction(actionId, { status: 'failed', error: 'Action failed' });
137
  logger.error(`[${action.type}]:Action failed\n\n`, error);
@@ -145,16 +152,19 @@ export class ActionRunner {
145
  if (action.type !== 'shell') {
146
  unreachable('Expected shell action');
147
  }
148
- const shell = this.#shellTerminal()
149
- await shell.ready()
 
 
150
  if (!shell || !shell.terminal || !shell.process) {
151
  unreachable('Shell terminal not found');
152
  }
153
- const resp = await shell.executeCommand(this.runnerId.get(), action.content)
154
- logger.debug(`${action.type} Shell Response: [exit code:${resp?.exitCode}]`)
155
- if (resp?.exitCode != 0) {
156
- throw new Error("Failed To Execute Shell Command");
157
 
 
 
 
 
 
158
  }
159
  }
160
 
@@ -162,21 +172,26 @@ export class ActionRunner {
162
  if (action.type !== 'start') {
163
  unreachable('Expected shell action');
164
  }
 
165
  if (!this.#shellTerminal) {
166
  unreachable('Shell terminal not found');
167
  }
168
- const shell = this.#shellTerminal()
169
- await shell.ready()
 
 
170
  if (!shell || !shell.terminal || !shell.process) {
171
  unreachable('Shell terminal not found');
172
  }
173
- const resp = await shell.executeCommand(this.runnerId.get(), action.content)
174
- logger.debug(`${action.type} Shell Response: [exit code:${resp?.exitCode}]`)
 
175
 
176
  if (resp?.exitCode != 0) {
177
- throw new Error("Failed To Start Application");
178
  }
179
- return resp
 
180
  }
181
 
182
  async #runFileAction(action: ActionState) {
 
45
  constructor(webcontainerPromise: Promise<WebContainer>, getShellTerminal: () => BoltShell) {
46
  this.#webcontainer = webcontainerPromise;
47
  this.#shellTerminal = getShellTerminal;
 
48
  }
49
 
50
  addAction(data: ActionCallbackData) {
 
87
  if (action.executed) {
88
  return;
89
  }
90
+
91
  if (isStreaming && action.type !== 'file') {
92
  return;
93
  }
94
 
95
  this.#updateAction(actionId, { ...action, ...data.action, executed: !isStreaming });
96
 
97
+ return (this.#currentExecutionPromise = this.#currentExecutionPromise
98
  .then(() => {
99
  return this.#executeAction(actionId, isStreaming);
100
  })
101
  .catch((error) => {
102
  console.error('Action failed:', error);
103
+ }));
104
  }
105
 
106
  async #executeAction(actionId: string, isStreaming: boolean = false) {
 
121
  case 'start': {
122
  // making the start app non blocking
123
 
124
+ this.#runStartAction(action)
125
+ .then(() => this.#updateAction(actionId, { status: 'complete' }))
126
+ .catch(() => this.#updateAction(actionId, { status: 'failed', error: 'Action failed' }));
127
+
128
+ /*
129
+ * adding a delay to avoid any race condition between 2 start actions
130
+ * i am up for a better approch
131
+ */
132
+ await new Promise((resolve) => setTimeout(resolve, 2000));
133
+
134
+ return;
135
  break;
136
  }
137
  }
138
 
139
+ this.#updateAction(actionId, {
140
+ status: isStreaming ? 'running' : action.abortSignal.aborted ? 'aborted' : 'complete',
141
+ });
142
  } catch (error) {
143
  this.#updateAction(actionId, { status: 'failed', error: 'Action failed' });
144
  logger.error(`[${action.type}]:Action failed\n\n`, error);
 
152
  if (action.type !== 'shell') {
153
  unreachable('Expected shell action');
154
  }
155
+
156
+ const shell = this.#shellTerminal();
157
+ await shell.ready();
158
+
159
  if (!shell || !shell.terminal || !shell.process) {
160
  unreachable('Shell terminal not found');
161
  }
 
 
 
 
162
 
163
+ const resp = await shell.executeCommand(this.runnerId.get(), action.content);
164
+ logger.debug(`${action.type} Shell Response: [exit code:${resp?.exitCode}]`);
165
+
166
+ if (resp?.exitCode != 0) {
167
+ throw new Error('Failed To Execute Shell Command');
168
  }
169
  }
170
 
 
172
  if (action.type !== 'start') {
173
  unreachable('Expected shell action');
174
  }
175
+
176
  if (!this.#shellTerminal) {
177
  unreachable('Shell terminal not found');
178
  }
179
+
180
+ const shell = this.#shellTerminal();
181
+ await shell.ready();
182
+
183
  if (!shell || !shell.terminal || !shell.process) {
184
  unreachable('Shell terminal not found');
185
  }
186
+
187
+ const resp = await shell.executeCommand(this.runnerId.get(), action.content);
188
+ logger.debug(`${action.type} Shell Response: [exit code:${resp?.exitCode}]`);
189
 
190
  if (resp?.exitCode != 0) {
191
+ throw new Error('Failed To Start Application');
192
  }
193
+
194
+ return resp;
195
  }
196
 
197
  async #runFileAction(action: ActionState) {
app/lib/runtime/message-parser.ts CHANGED
@@ -55,7 +55,7 @@ interface MessageState {
55
  export class StreamingMessageParser {
56
  #messages = new Map<string, MessageState>();
57
 
58
- constructor(private _options: StreamingMessageParserOptions = {}) { }
59
 
60
  parse(messageId: string, input: string) {
61
  let state = this.#messages.get(messageId);
@@ -120,20 +120,20 @@ export class StreamingMessageParser {
120
  i = closeIndex + ARTIFACT_ACTION_TAG_CLOSE.length;
121
  } else {
122
  if ('type' in currentAction && currentAction.type === 'file') {
123
- let content = input.slice(i);
124
 
125
  this._options.callbacks?.onActionStream?.({
126
  artifactId: currentArtifact.id,
127
  messageId,
128
  actionId: String(state.actionId - 1),
129
  action: {
130
- ...currentAction as FileAction,
131
  content,
132
  filePath: currentAction.filePath,
133
  },
134
-
135
  });
136
  }
 
137
  break;
138
  }
139
  } else {
@@ -272,7 +272,7 @@ export class StreamingMessageParser {
272
  }
273
 
274
  (actionAttributes as FileAction).filePath = filePath;
275
- } else if (!(['shell', 'start'].includes(actionType))) {
276
  logger.warn(`Unknown action type '${actionType}'`);
277
  }
278
 
 
55
  export class StreamingMessageParser {
56
  #messages = new Map<string, MessageState>();
57
 
58
+ constructor(private _options: StreamingMessageParserOptions = {}) {}
59
 
60
  parse(messageId: string, input: string) {
61
  let state = this.#messages.get(messageId);
 
120
  i = closeIndex + ARTIFACT_ACTION_TAG_CLOSE.length;
121
  } else {
122
  if ('type' in currentAction && currentAction.type === 'file') {
123
+ const content = input.slice(i);
124
 
125
  this._options.callbacks?.onActionStream?.({
126
  artifactId: currentArtifact.id,
127
  messageId,
128
  actionId: String(state.actionId - 1),
129
  action: {
130
+ ...(currentAction as FileAction),
131
  content,
132
  filePath: currentAction.filePath,
133
  },
 
134
  });
135
  }
136
+
137
  break;
138
  }
139
  } else {
 
272
  }
273
 
274
  (actionAttributes as FileAction).filePath = filePath;
275
+ } else if (!['shell', 'start'].includes(actionType)) {
276
  logger.warn(`Unknown action type '${actionType}'`);
277
  }
278
 
app/lib/stores/terminal.ts CHANGED
@@ -7,7 +7,7 @@ import { coloredText } from '~/utils/terminal';
7
  export class TerminalStore {
8
  #webcontainer: Promise<WebContainer>;
9
  #terminals: Array<{ terminal: ITerminal; process: WebContainerProcess }> = [];
10
- #boltTerminal = newBoltShellProcess()
11
 
12
  showTerminal: WritableAtom<boolean> = import.meta.hot?.data.showTerminal ?? atom(true);
13
 
@@ -27,8 +27,8 @@ export class TerminalStore {
27
  }
28
  async attachBoltTerminal(terminal: ITerminal) {
29
  try {
30
- let wc = await this.#webcontainer
31
- await this.#boltTerminal.init(wc, terminal)
32
  } catch (error: any) {
33
  terminal.write(coloredText.red('Failed to spawn bolt shell\n\n') + error.message);
34
  return;
 
7
  export class TerminalStore {
8
  #webcontainer: Promise<WebContainer>;
9
  #terminals: Array<{ terminal: ITerminal; process: WebContainerProcess }> = [];
10
+ #boltTerminal = newBoltShellProcess();
11
 
12
  showTerminal: WritableAtom<boolean> = import.meta.hot?.data.showTerminal ?? atom(true);
13
 
 
27
  }
28
  async attachBoltTerminal(terminal: ITerminal) {
29
  try {
30
+ const wc = await this.#webcontainer;
31
+ await this.#boltTerminal.init(wc, terminal);
32
  } catch (error: any) {
33
  terminal.write(coloredText.red('Failed to spawn bolt shell\n\n') + error.message);
34
  return;
app/lib/stores/workbench.ts CHANGED
@@ -11,7 +11,7 @@ import { PreviewsStore } from './previews';
11
  import { TerminalStore } from './terminal';
12
  import JSZip from 'jszip';
13
  import { saveAs } from 'file-saver';
14
- import { Octokit, type RestEndpointMethodTypes } from "@octokit/rest";
15
  import * as nodePath from 'node:path';
16
  import type { WebContainerProcess } from '@webcontainer/api';
17
  import { extractRelativePath } from '~/utils/diff';
@@ -43,7 +43,7 @@ export class WorkbenchStore {
43
  modifiedFiles = new Set<string>();
44
  artifactIdList: string[] = [];
45
  #boltTerminal: { terminal: ITerminal; process: WebContainerProcess } | undefined;
46
- #globalExecutionQueue=Promise.resolve();
47
  constructor() {
48
  if (import.meta.hot) {
49
  import.meta.hot.data.artifacts = this.artifacts;
@@ -54,7 +54,7 @@ export class WorkbenchStore {
54
  }
55
 
56
  addToExecutionQueue(callback: () => Promise<void>) {
57
- this.#globalExecutionQueue=this.#globalExecutionQueue.then(()=>callback())
58
  }
59
 
60
  get previews() {
@@ -96,7 +96,6 @@ export class WorkbenchStore {
96
  this.#terminalStore.attachTerminal(terminal);
97
  }
98
  attachBoltTerminal(terminal: ITerminal) {
99
-
100
  this.#terminalStore.attachBoltTerminal(terminal);
101
  }
102
 
@@ -261,7 +260,8 @@ export class WorkbenchStore {
261
  this.artifacts.setKey(messageId, { ...artifact, ...state });
262
  }
263
  addAction(data: ActionCallbackData) {
264
- this._addAction(data)
 
265
  // this.addToExecutionQueue(()=>this._addAction(data))
266
  }
267
  async _addAction(data: ActionCallbackData) {
@@ -277,11 +277,10 @@ export class WorkbenchStore {
277
  }
278
 
279
  runAction(data: ActionCallbackData, isStreaming: boolean = false) {
280
- if(isStreaming) {
281
- this._runAction(data, isStreaming)
282
- }
283
- else{
284
- this.addToExecutionQueue(()=>this._runAction(data, isStreaming))
285
  }
286
  }
287
  async _runAction(data: ActionCallbackData, isStreaming: boolean = false) {
@@ -292,16 +291,21 @@ export class WorkbenchStore {
292
  if (!artifact) {
293
  unreachable('Artifact not found');
294
  }
 
295
  if (data.action.type === 'file') {
296
- let wc = await webcontainer
297
  const fullPath = nodePath.join(wc.workdir, data.action.filePath);
 
298
  if (this.selectedFile.value !== fullPath) {
299
  this.setSelectedFile(fullPath);
300
  }
 
301
  if (this.currentView.value !== 'code') {
302
  this.currentView.set('code');
303
  }
 
304
  const doc = this.#editorStore.documents.get()[fullPath];
 
305
  if (!doc) {
306
  await artifact.runner.runAction(data, isStreaming);
307
  }
@@ -382,7 +386,6 @@ export class WorkbenchStore {
382
  }
383
 
384
  async pushToGitHub(repoName: string, githubUsername: string, ghToken: string) {
385
-
386
  try {
387
  // Get the GitHub auth token from environment variables
388
  const githubToken = ghToken;
@@ -397,10 +400,11 @@ export class WorkbenchStore {
397
  const octokit = new Octokit({ auth: githubToken });
398
 
399
  // Check if the repository already exists before creating it
400
- let repo: RestEndpointMethodTypes["repos"]["get"]["response"]['data']
 
401
  try {
402
- let resp = await octokit.repos.get({ owner: owner, repo: repoName });
403
- repo = resp.data
404
  } catch (error) {
405
  if (error instanceof Error && 'status' in error && error.status === 404) {
406
  // Repository doesn't exist, so create a new one
@@ -418,6 +422,7 @@ export class WorkbenchStore {
418
 
419
  // Get all files
420
  const files = this.files.get();
 
421
  if (!files || Object.keys(files).length === 0) {
422
  throw new Error('No files found to push');
423
  }
@@ -434,7 +439,7 @@ export class WorkbenchStore {
434
  });
435
  return { path: extractRelativePath(filePath), sha: blob.sha };
436
  }
437
- })
438
  );
439
 
440
  const validBlobs = blobs.filter(Boolean); // Filter out any undefined blobs
 
11
  import { TerminalStore } from './terminal';
12
  import JSZip from 'jszip';
13
  import { saveAs } from 'file-saver';
14
+ import { Octokit, type RestEndpointMethodTypes } from '@octokit/rest';
15
  import * as nodePath from 'node:path';
16
  import type { WebContainerProcess } from '@webcontainer/api';
17
  import { extractRelativePath } from '~/utils/diff';
 
43
  modifiedFiles = new Set<string>();
44
  artifactIdList: string[] = [];
45
  #boltTerminal: { terminal: ITerminal; process: WebContainerProcess } | undefined;
46
+ #globalExecutionQueue = Promise.resolve();
47
  constructor() {
48
  if (import.meta.hot) {
49
  import.meta.hot.data.artifacts = this.artifacts;
 
54
  }
55
 
56
  addToExecutionQueue(callback: () => Promise<void>) {
57
+ this.#globalExecutionQueue = this.#globalExecutionQueue.then(() => callback());
58
  }
59
 
60
  get previews() {
 
96
  this.#terminalStore.attachTerminal(terminal);
97
  }
98
  attachBoltTerminal(terminal: ITerminal) {
 
99
  this.#terminalStore.attachBoltTerminal(terminal);
100
  }
101
 
 
260
  this.artifacts.setKey(messageId, { ...artifact, ...state });
261
  }
262
  addAction(data: ActionCallbackData) {
263
+ this._addAction(data);
264
+
265
  // this.addToExecutionQueue(()=>this._addAction(data))
266
  }
267
  async _addAction(data: ActionCallbackData) {
 
277
  }
278
 
279
  runAction(data: ActionCallbackData, isStreaming: boolean = false) {
280
+ if (isStreaming) {
281
+ this._runAction(data, isStreaming);
282
+ } else {
283
+ this.addToExecutionQueue(() => this._runAction(data, isStreaming));
 
284
  }
285
  }
286
  async _runAction(data: ActionCallbackData, isStreaming: boolean = false) {
 
291
  if (!artifact) {
292
  unreachable('Artifact not found');
293
  }
294
+
295
  if (data.action.type === 'file') {
296
+ const wc = await webcontainer;
297
  const fullPath = nodePath.join(wc.workdir, data.action.filePath);
298
+
299
  if (this.selectedFile.value !== fullPath) {
300
  this.setSelectedFile(fullPath);
301
  }
302
+
303
  if (this.currentView.value !== 'code') {
304
  this.currentView.set('code');
305
  }
306
+
307
  const doc = this.#editorStore.documents.get()[fullPath];
308
+
309
  if (!doc) {
310
  await artifact.runner.runAction(data, isStreaming);
311
  }
 
386
  }
387
 
388
  async pushToGitHub(repoName: string, githubUsername: string, ghToken: string) {
 
389
  try {
390
  // Get the GitHub auth token from environment variables
391
  const githubToken = ghToken;
 
400
  const octokit = new Octokit({ auth: githubToken });
401
 
402
  // Check if the repository already exists before creating it
403
+ let repo: RestEndpointMethodTypes['repos']['get']['response']['data'];
404
+
405
  try {
406
+ const resp = await octokit.repos.get({ owner, repo: repoName });
407
+ repo = resp.data;
408
  } catch (error) {
409
  if (error instanceof Error && 'status' in error && error.status === 404) {
410
  // Repository doesn't exist, so create a new one
 
422
 
423
  // Get all files
424
  const files = this.files.get();
425
+
426
  if (!files || Object.keys(files).length === 0) {
427
  throw new Error('No files found to push');
428
  }
 
439
  });
440
  return { path: extractRelativePath(filePath), sha: blob.sha };
441
  }
442
+ }),
443
  );
444
 
445
  const validBlobs = blobs.filter(Boolean); // Filter out any undefined blobs
app/routes/api.chat.ts CHANGED
@@ -1,5 +1,7 @@
1
- // @ts-nocheck
2
- // Preventing TS checks with files presented in the video for a better presentation.
 
 
3
  import { type ActionFunctionArgs } from '@remix-run/cloudflare';
4
  import { MAX_RESPONSE_SEGMENTS, MAX_TOKENS } from '~/lib/.server/llm/constants';
5
  import { CONTINUE_PROMPT } from '~/lib/.server/llm/prompts';
@@ -14,14 +16,15 @@ function parseCookies(cookieHeader) {
14
  const cookies = {};
15
 
16
  // Split the cookie string by semicolons and spaces
17
- const items = cookieHeader.split(";").map(cookie => cookie.trim());
 
 
 
18
 
19
- items.forEach(item => {
20
- const [name, ...rest] = item.split("=");
21
  if (name && rest) {
22
  // Decode the name and value, and join value parts in case it contains '='
23
  const decodedName = decodeURIComponent(name.trim());
24
- const decodedValue = decodeURIComponent(rest.join("=").trim());
25
  cookies[decodedName] = decodedValue;
26
  }
27
  });
@@ -31,13 +34,13 @@ function parseCookies(cookieHeader) {
31
 
32
  async function chatAction({ context, request }: ActionFunctionArgs) {
33
  const { messages } = await request.json<{
34
- messages: Messages
35
  }>();
36
 
37
- const cookieHeader = request.headers.get("Cookie");
38
 
39
  // Parse the cookie's value (returns an object or null if no cookie exists)
40
- const apiKeys = JSON.parse(parseCookies(cookieHeader).apiKeys || "{}");
41
 
42
  const stream = new SwitchableStream();
43
 
@@ -83,7 +86,7 @@ async function chatAction({ context, request }: ActionFunctionArgs) {
83
  if (error.message?.includes('API key')) {
84
  throw new Response('Invalid or missing API key', {
85
  status: 401,
86
- statusText: 'Unauthorized'
87
  });
88
  }
89
 
 
1
+ /*
2
+ * @ts-nocheck
3
+ * Preventing TS checks with files presented in the video for a better presentation.
4
+ */
5
  import { type ActionFunctionArgs } from '@remix-run/cloudflare';
6
  import { MAX_RESPONSE_SEGMENTS, MAX_TOKENS } from '~/lib/.server/llm/constants';
7
  import { CONTINUE_PROMPT } from '~/lib/.server/llm/prompts';
 
16
  const cookies = {};
17
 
18
  // Split the cookie string by semicolons and spaces
19
+ const items = cookieHeader.split(';').map((cookie) => cookie.trim());
20
+
21
+ items.forEach((item) => {
22
+ const [name, ...rest] = item.split('=');
23
 
 
 
24
  if (name && rest) {
25
  // Decode the name and value, and join value parts in case it contains '='
26
  const decodedName = decodeURIComponent(name.trim());
27
+ const decodedValue = decodeURIComponent(rest.join('=').trim());
28
  cookies[decodedName] = decodedValue;
29
  }
30
  });
 
34
 
35
  async function chatAction({ context, request }: ActionFunctionArgs) {
36
  const { messages } = await request.json<{
37
+ messages: Messages;
38
  }>();
39
 
40
+ const cookieHeader = request.headers.get('Cookie');
41
 
42
  // Parse the cookie's value (returns an object or null if no cookie exists)
43
+ const apiKeys = JSON.parse(parseCookies(cookieHeader).apiKeys || '{}');
44
 
45
  const stream = new SwitchableStream();
46
 
 
86
  if (error.message?.includes('API key')) {
87
  throw new Response('Invalid or missing API key', {
88
  status: 401,
89
+ statusText: 'Unauthorized',
90
  });
91
  }
92
 
app/types/model.ts CHANGED
@@ -1,10 +1,10 @@
1
  import type { ModelInfo } from '~/utils/types';
2
 
3
  export type ProviderInfo = {
4
- staticModels: ModelInfo[],
5
- name: string,
6
- getDynamicModels?: () => Promise<ModelInfo[]>,
7
- getApiKeyLink?: string,
8
- labelForGetApiKey?: string,
9
- icon?:string,
10
  };
 
1
  import type { ModelInfo } from '~/utils/types';
2
 
3
  export type ProviderInfo = {
4
+ staticModels: ModelInfo[];
5
+ name: string;
6
+ getDynamicModels?: () => Promise<ModelInfo[]>;
7
+ getApiKeyLink?: string;
8
+ labelForGetApiKey?: string;
9
+ icon?: string;
10
  };
app/utils/constants.ts CHANGED
@@ -12,26 +12,42 @@ const PROVIDER_LIST: ProviderInfo[] = [
12
  {
13
  name: 'Anthropic',
14
  staticModels: [
15
- { name: 'claude-3-5-sonnet-latest', label: 'Claude 3.5 Sonnet (new)', provider: 'Anthropic', maxTokenAllowed: 8000 },
16
- { name: 'claude-3-5-sonnet-20240620', label: 'Claude 3.5 Sonnet (old)', provider: 'Anthropic', maxTokenAllowed: 8000 },
17
- { name: 'claude-3-5-haiku-latest', label: 'Claude 3.5 Haiku (new)', provider: 'Anthropic', maxTokenAllowed: 8000 },
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
18
  { name: 'claude-3-opus-latest', label: 'Claude 3 Opus', provider: 'Anthropic', maxTokenAllowed: 8000 },
19
  { name: 'claude-3-sonnet-20240229', label: 'Claude 3 Sonnet', provider: 'Anthropic', maxTokenAllowed: 8000 },
20
- { name: 'claude-3-haiku-20240307', label: 'Claude 3 Haiku', provider: 'Anthropic', maxTokenAllowed: 8000 }
21
  ],
22
- getApiKeyLink: "https://console.anthropic.com/settings/keys",
23
  },
24
  {
25
  name: 'Ollama',
26
  staticModels: [],
27
  getDynamicModels: getOllamaModels,
28
- getApiKeyLink: "https://ollama.com/download",
29
- labelForGetApiKey: "Download Ollama",
30
- icon: "i-ph:cloud-arrow-down",
31
- }, {
 
32
  name: 'OpenAILike',
33
  staticModels: [],
34
- getDynamicModels: getOpenAILikeModels
35
  },
36
  {
37
  name: 'Cohere',
@@ -47,7 +63,7 @@ const PROVIDER_LIST: ProviderInfo[] = [
47
  { name: 'c4ai-aya-expanse-8b', label: 'c4AI Aya Expanse 8b', provider: 'Cohere', maxTokenAllowed: 4096 },
48
  { name: 'c4ai-aya-expanse-32b', label: 'c4AI Aya Expanse 32b', provider: 'Cohere', maxTokenAllowed: 4096 },
49
  ],
50
- getApiKeyLink: 'https://dashboard.cohere.com/api-keys'
51
  },
52
  {
53
  name: 'OpenRouter',
@@ -56,22 +72,52 @@ const PROVIDER_LIST: ProviderInfo[] = [
56
  {
57
  name: 'anthropic/claude-3.5-sonnet',
58
  label: 'Anthropic: Claude 3.5 Sonnet (OpenRouter)',
59
- provider: 'OpenRouter'
60
- , maxTokenAllowed: 8000
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
61
  },
62
- { name: 'anthropic/claude-3-haiku', label: 'Anthropic: Claude 3 Haiku (OpenRouter)', provider: 'OpenRouter', maxTokenAllowed: 8000 },
63
- { name: 'deepseek/deepseek-coder', label: 'Deepseek-Coder V2 236B (OpenRouter)', provider: 'OpenRouter', maxTokenAllowed: 8000 },
64
- { name: 'google/gemini-flash-1.5', label: 'Google Gemini Flash 1.5 (OpenRouter)', provider: 'OpenRouter', maxTokenAllowed: 8000 },
65
- { name: 'google/gemini-pro-1.5', label: 'Google Gemini Pro 1.5 (OpenRouter)', provider: 'OpenRouter', maxTokenAllowed: 8000 },
66
  { name: 'x-ai/grok-beta', label: 'xAI Grok Beta (OpenRouter)', provider: 'OpenRouter', maxTokenAllowed: 8000 },
67
- { name: 'mistralai/mistral-nemo', label: 'OpenRouter Mistral Nemo (OpenRouter)', provider: 'OpenRouter', maxTokenAllowed: 8000 },
68
- { name: 'qwen/qwen-110b-chat', label: 'OpenRouter Qwen 110b Chat (OpenRouter)', provider: 'OpenRouter', maxTokenAllowed: 8000 },
69
- { name: 'cohere/command', label: 'Cohere Command (OpenRouter)', provider: 'OpenRouter', maxTokenAllowed: 4096 }
 
 
 
 
 
 
 
 
 
 
70
  ],
71
  getDynamicModels: getOpenRouterModels,
72
  getApiKeyLink: 'https://openrouter.ai/settings/keys',
73
-
74
- }, {
75
  name: 'Google',
76
  staticModels: [
77
  { name: 'gemini-1.5-flash-latest', label: 'Gemini 1.5 Flash', provider: 'Google', maxTokenAllowed: 8192 },
@@ -79,29 +125,50 @@ const PROVIDER_LIST: ProviderInfo[] = [
79
  { name: 'gemini-1.5-flash-8b', label: 'Gemini 1.5 Flash-8b', provider: 'Google', maxTokenAllowed: 8192 },
80
  { name: 'gemini-1.5-pro-latest', label: 'Gemini 1.5 Pro', provider: 'Google', maxTokenAllowed: 8192 },
81
  { name: 'gemini-1.5-pro-002', label: 'Gemini 1.5 Pro-002', provider: 'Google', maxTokenAllowed: 8192 },
82
- { name: 'gemini-exp-1114', label: 'Gemini exp-1114', provider: 'Google', maxTokenAllowed: 8192 }
83
  ],
84
- getApiKeyLink: 'https://aistudio.google.com/app/apikey'
85
- }, {
 
86
  name: 'Groq',
87
  staticModels: [
88
  { name: 'llama-3.1-70b-versatile', label: 'Llama 3.1 70b (Groq)', provider: 'Groq', maxTokenAllowed: 8000 },
89
  { name: 'llama-3.1-8b-instant', label: 'Llama 3.1 8b (Groq)', provider: 'Groq', maxTokenAllowed: 8000 },
90
  { name: 'llama-3.2-11b-vision-preview', label: 'Llama 3.2 11b (Groq)', provider: 'Groq', maxTokenAllowed: 8000 },
91
  { name: 'llama-3.2-3b-preview', label: 'Llama 3.2 3b (Groq)', provider: 'Groq', maxTokenAllowed: 8000 },
92
- { name: 'llama-3.2-1b-preview', label: 'Llama 3.2 1b (Groq)', provider: 'Groq', maxTokenAllowed: 8000 }
93
  ],
94
- getApiKeyLink: 'https://console.groq.com/keys'
95
  },
96
  {
97
  name: 'HuggingFace',
98
  staticModels: [
99
- { name: 'Qwen/Qwen2.5-Coder-32B-Instruct', label: 'Qwen2.5-Coder-32B-Instruct (HuggingFace)', provider: 'HuggingFace', maxTokenAllowed: 8000 },
100
- { name: '01-ai/Yi-1.5-34B-Chat', label: 'Yi-1.5-34B-Chat (HuggingFace)', provider: 'HuggingFace', maxTokenAllowed: 8000 },
101
- { name: 'codellama/CodeLlama-34b-Instruct-hf', label: 'CodeLlama-34b-Instruct (HuggingFace)', provider: 'HuggingFace', maxTokenAllowed: 8000 },
102
- { name: 'NousResearch/Hermes-3-Llama-3.1-8B', label: 'Hermes-3-Llama-3.1-8B (HuggingFace)', provider: 'HuggingFace', maxTokenAllowed: 8000 }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
103
  ],
104
- getApiKeyLink: 'https://huggingface.co/settings/tokens'
105
  },
106
 
107
  {
@@ -110,23 +177,24 @@ const PROVIDER_LIST: ProviderInfo[] = [
110
  { name: 'gpt-4o-mini', label: 'GPT-4o Mini', provider: 'OpenAI', maxTokenAllowed: 8000 },
111
  { name: 'gpt-4-turbo', label: 'GPT-4 Turbo', provider: 'OpenAI', maxTokenAllowed: 8000 },
112
  { name: 'gpt-4', label: 'GPT-4', provider: 'OpenAI', maxTokenAllowed: 8000 },
113
- { name: 'gpt-3.5-turbo', label: 'GPT-3.5 Turbo', provider: 'OpenAI', maxTokenAllowed: 8000 }
114
  ],
115
- getApiKeyLink: "https://platform.openai.com/api-keys",
116
- }, {
 
117
  name: 'xAI',
118
- staticModels: [
119
- { name: 'grok-beta', label: 'xAI Grok Beta', provider: 'xAI', maxTokenAllowed: 8000 }
120
- ],
121
- getApiKeyLink: 'https://docs.x.ai/docs/quickstart#creating-an-api-key'
122
- }, {
123
  name: 'Deepseek',
124
  staticModels: [
125
  { name: 'deepseek-coder', label: 'Deepseek-Coder', provider: 'Deepseek', maxTokenAllowed: 8000 },
126
- { name: 'deepseek-chat', label: 'Deepseek-Chat', provider: 'Deepseek', maxTokenAllowed: 8000 }
127
  ],
128
- getApiKeyLink: 'https://platform.deepseek.com/api_keys'
129
- }, {
 
130
  name: 'Mistral',
131
  staticModels: [
132
  { name: 'open-mistral-7b', label: 'Mistral 7B', provider: 'Mistral', maxTokenAllowed: 8000 },
@@ -137,27 +205,29 @@ const PROVIDER_LIST: ProviderInfo[] = [
137
  { name: 'ministral-8b-latest', label: 'Mistral 8B', provider: 'Mistral', maxTokenAllowed: 8000 },
138
  { name: 'mistral-small-latest', label: 'Mistral Small', provider: 'Mistral', maxTokenAllowed: 8000 },
139
  { name: 'codestral-latest', label: 'Codestral', provider: 'Mistral', maxTokenAllowed: 8000 },
140
- { name: 'mistral-large-latest', label: 'Mistral Large Latest', provider: 'Mistral', maxTokenAllowed: 8000 }
141
  ],
142
- getApiKeyLink: 'https://console.mistral.ai/api-keys/'
143
- }, {
 
144
  name: 'LMStudio',
145
  staticModels: [],
146
  getDynamicModels: getLMStudioModels,
147
  getApiKeyLink: 'https://lmstudio.ai/',
148
  labelForGetApiKey: 'Get LMStudio',
149
- icon: "i-ph:cloud-arrow-down",
150
- }
151
  ];
152
 
153
  export const DEFAULT_PROVIDER = PROVIDER_LIST[0];
154
 
155
- const staticModels: ModelInfo[] = PROVIDER_LIST.map(p => p.staticModels).flat();
156
 
157
  export let MODEL_LIST: ModelInfo[] = [...staticModels];
158
 
159
  const getOllamaBaseUrl = () => {
160
  const defaultBaseUrl = import.meta.env.OLLAMA_API_BASE_URL || 'http://localhost:11434';
 
161
  // Check if we're in the browser
162
  if (typeof window !== 'undefined') {
163
  // Frontend always uses localhost
@@ -167,22 +237,20 @@ const getOllamaBaseUrl = () => {
167
  // Backend: Check if we're running in Docker
168
  const isDocker = process.env.RUNNING_IN_DOCKER === 'true';
169
 
170
- return isDocker
171
- ? defaultBaseUrl.replace('localhost', 'host.docker.internal')
172
- : defaultBaseUrl;
173
  };
174
 
175
  async function getOllamaModels(): Promise<ModelInfo[]> {
176
  try {
177
  const base_url = getOllamaBaseUrl();
178
  const response = await fetch(`${base_url}/api/tags`);
179
- const data = await response.json() as OllamaApiResponse;
180
 
181
  return data.models.map((model: OllamaModel) => ({
182
  name: model.name,
183
  label: `${model.name} (${model.details.parameter_size})`,
184
  provider: 'Ollama',
185
- maxTokenAllowed:8000,
186
  }));
187
  } catch (e) {
188
  return [];
@@ -192,20 +260,23 @@ async function getOllamaModels(): Promise<ModelInfo[]> {
192
  async function getOpenAILikeModels(): Promise<ModelInfo[]> {
193
  try {
194
  const base_url = import.meta.env.OPENAI_LIKE_API_BASE_URL || '';
 
195
  if (!base_url) {
196
  return [];
197
  }
 
198
  const api_key = import.meta.env.OPENAI_LIKE_API_KEY ?? '';
199
  const response = await fetch(`${base_url}/models`, {
200
  headers: {
201
- Authorization: `Bearer ${api_key}`
202
- }
203
  });
204
- const res = await response.json() as any;
 
205
  return res.data.map((model: any) => ({
206
  name: model.id,
207
  label: model.id,
208
- provider: 'OpenAILike'
209
  }));
210
  } catch (e) {
211
  return [];
@@ -220,51 +291,66 @@ type OpenRouterModelsResponse = {
220
  pricing: {
221
  prompt: number;
222
  completion: number;
223
- }
224
- }[]
225
  };
226
 
227
  async function getOpenRouterModels(): Promise<ModelInfo[]> {
228
- const data: OpenRouterModelsResponse = await (await fetch('https://openrouter.ai/api/v1/models', {
229
- headers: {
230
- 'Content-Type': 'application/json'
231
- }
232
- })).json();
 
 
233
 
234
- return data.data.sort((a, b) => a.name.localeCompare(b.name)).map(m => ({
235
- name: m.id,
236
- label: `${m.name} - in:$${(m.pricing.prompt * 1_000_000).toFixed(
237
- 2)} out:$${(m.pricing.completion * 1_000_000).toFixed(2)} - context ${Math.floor(
238
- m.context_length / 1000)}k`,
239
- provider: 'OpenRouter',
240
- maxTokenAllowed:8000,
241
- }));
 
 
242
  }
243
 
244
  async function getLMStudioModels(): Promise<ModelInfo[]> {
245
  try {
246
  const base_url = import.meta.env.LMSTUDIO_API_BASE_URL || 'http://localhost:1234';
247
  const response = await fetch(`${base_url}/v1/models`);
248
- const data = await response.json() as any;
 
249
  return data.data.map((model: any) => ({
250
  name: model.id,
251
  label: model.id,
252
- provider: 'LMStudio'
253
  }));
254
  } catch (e) {
255
  return [];
256
  }
257
  }
258
 
259
-
260
-
261
  async function initializeModelList(): Promise<ModelInfo[]> {
262
- MODEL_LIST = [...(await Promise.all(
263
- PROVIDER_LIST
264
- .filter((p): p is ProviderInfo & { getDynamicModels: () => Promise<ModelInfo[]> } => !!p.getDynamicModels)
265
- .map(p => p.getDynamicModels())))
266
- .flat(), ...staticModels];
 
 
 
 
 
267
  return MODEL_LIST;
268
  }
269
 
270
- export { getOllamaModels, getOpenAILikeModels, getLMStudioModels, initializeModelList, getOpenRouterModels, PROVIDER_LIST };
 
 
 
 
 
 
 
 
12
  {
13
  name: 'Anthropic',
14
  staticModels: [
15
+ {
16
+ name: 'claude-3-5-sonnet-latest',
17
+ label: 'Claude 3.5 Sonnet (new)',
18
+ provider: 'Anthropic',
19
+ maxTokenAllowed: 8000,
20
+ },
21
+ {
22
+ name: 'claude-3-5-sonnet-20240620',
23
+ label: 'Claude 3.5 Sonnet (old)',
24
+ provider: 'Anthropic',
25
+ maxTokenAllowed: 8000,
26
+ },
27
+ {
28
+ name: 'claude-3-5-haiku-latest',
29
+ label: 'Claude 3.5 Haiku (new)',
30
+ provider: 'Anthropic',
31
+ maxTokenAllowed: 8000,
32
+ },
33
  { name: 'claude-3-opus-latest', label: 'Claude 3 Opus', provider: 'Anthropic', maxTokenAllowed: 8000 },
34
  { name: 'claude-3-sonnet-20240229', label: 'Claude 3 Sonnet', provider: 'Anthropic', maxTokenAllowed: 8000 },
35
+ { name: 'claude-3-haiku-20240307', label: 'Claude 3 Haiku', provider: 'Anthropic', maxTokenAllowed: 8000 },
36
  ],
37
+ getApiKeyLink: 'https://console.anthropic.com/settings/keys',
38
  },
39
  {
40
  name: 'Ollama',
41
  staticModels: [],
42
  getDynamicModels: getOllamaModels,
43
+ getApiKeyLink: 'https://ollama.com/download',
44
+ labelForGetApiKey: 'Download Ollama',
45
+ icon: 'i-ph:cloud-arrow-down',
46
+ },
47
+ {
48
  name: 'OpenAILike',
49
  staticModels: [],
50
+ getDynamicModels: getOpenAILikeModels,
51
  },
52
  {
53
  name: 'Cohere',
 
63
  { name: 'c4ai-aya-expanse-8b', label: 'c4AI Aya Expanse 8b', provider: 'Cohere', maxTokenAllowed: 4096 },
64
  { name: 'c4ai-aya-expanse-32b', label: 'c4AI Aya Expanse 32b', provider: 'Cohere', maxTokenAllowed: 4096 },
65
  ],
66
+ getApiKeyLink: 'https://dashboard.cohere.com/api-keys',
67
  },
68
  {
69
  name: 'OpenRouter',
 
72
  {
73
  name: 'anthropic/claude-3.5-sonnet',
74
  label: 'Anthropic: Claude 3.5 Sonnet (OpenRouter)',
75
+ provider: 'OpenRouter',
76
+ maxTokenAllowed: 8000,
77
+ },
78
+ {
79
+ name: 'anthropic/claude-3-haiku',
80
+ label: 'Anthropic: Claude 3 Haiku (OpenRouter)',
81
+ provider: 'OpenRouter',
82
+ maxTokenAllowed: 8000,
83
+ },
84
+ {
85
+ name: 'deepseek/deepseek-coder',
86
+ label: 'Deepseek-Coder V2 236B (OpenRouter)',
87
+ provider: 'OpenRouter',
88
+ maxTokenAllowed: 8000,
89
+ },
90
+ {
91
+ name: 'google/gemini-flash-1.5',
92
+ label: 'Google Gemini Flash 1.5 (OpenRouter)',
93
+ provider: 'OpenRouter',
94
+ maxTokenAllowed: 8000,
95
+ },
96
+ {
97
+ name: 'google/gemini-pro-1.5',
98
+ label: 'Google Gemini Pro 1.5 (OpenRouter)',
99
+ provider: 'OpenRouter',
100
+ maxTokenAllowed: 8000,
101
  },
 
 
 
 
102
  { name: 'x-ai/grok-beta', label: 'xAI Grok Beta (OpenRouter)', provider: 'OpenRouter', maxTokenAllowed: 8000 },
103
+ {
104
+ name: 'mistralai/mistral-nemo',
105
+ label: 'OpenRouter Mistral Nemo (OpenRouter)',
106
+ provider: 'OpenRouter',
107
+ maxTokenAllowed: 8000,
108
+ },
109
+ {
110
+ name: 'qwen/qwen-110b-chat',
111
+ label: 'OpenRouter Qwen 110b Chat (OpenRouter)',
112
+ provider: 'OpenRouter',
113
+ maxTokenAllowed: 8000,
114
+ },
115
+ { name: 'cohere/command', label: 'Cohere Command (OpenRouter)', provider: 'OpenRouter', maxTokenAllowed: 4096 },
116
  ],
117
  getDynamicModels: getOpenRouterModels,
118
  getApiKeyLink: 'https://openrouter.ai/settings/keys',
119
+ },
120
+ {
121
  name: 'Google',
122
  staticModels: [
123
  { name: 'gemini-1.5-flash-latest', label: 'Gemini 1.5 Flash', provider: 'Google', maxTokenAllowed: 8192 },
 
125
  { name: 'gemini-1.5-flash-8b', label: 'Gemini 1.5 Flash-8b', provider: 'Google', maxTokenAllowed: 8192 },
126
  { name: 'gemini-1.5-pro-latest', label: 'Gemini 1.5 Pro', provider: 'Google', maxTokenAllowed: 8192 },
127
  { name: 'gemini-1.5-pro-002', label: 'Gemini 1.5 Pro-002', provider: 'Google', maxTokenAllowed: 8192 },
128
+ { name: 'gemini-exp-1114', label: 'Gemini exp-1114', provider: 'Google', maxTokenAllowed: 8192 },
129
  ],
130
+ getApiKeyLink: 'https://aistudio.google.com/app/apikey',
131
+ },
132
+ {
133
  name: 'Groq',
134
  staticModels: [
135
  { name: 'llama-3.1-70b-versatile', label: 'Llama 3.1 70b (Groq)', provider: 'Groq', maxTokenAllowed: 8000 },
136
  { name: 'llama-3.1-8b-instant', label: 'Llama 3.1 8b (Groq)', provider: 'Groq', maxTokenAllowed: 8000 },
137
  { name: 'llama-3.2-11b-vision-preview', label: 'Llama 3.2 11b (Groq)', provider: 'Groq', maxTokenAllowed: 8000 },
138
  { name: 'llama-3.2-3b-preview', label: 'Llama 3.2 3b (Groq)', provider: 'Groq', maxTokenAllowed: 8000 },
139
+ { name: 'llama-3.2-1b-preview', label: 'Llama 3.2 1b (Groq)', provider: 'Groq', maxTokenAllowed: 8000 },
140
  ],
141
+ getApiKeyLink: 'https://console.groq.com/keys',
142
  },
143
  {
144
  name: 'HuggingFace',
145
  staticModels: [
146
+ {
147
+ name: 'Qwen/Qwen2.5-Coder-32B-Instruct',
148
+ label: 'Qwen2.5-Coder-32B-Instruct (HuggingFace)',
149
+ provider: 'HuggingFace',
150
+ maxTokenAllowed: 8000,
151
+ },
152
+ {
153
+ name: '01-ai/Yi-1.5-34B-Chat',
154
+ label: 'Yi-1.5-34B-Chat (HuggingFace)',
155
+ provider: 'HuggingFace',
156
+ maxTokenAllowed: 8000,
157
+ },
158
+ {
159
+ name: 'codellama/CodeLlama-34b-Instruct-hf',
160
+ label: 'CodeLlama-34b-Instruct (HuggingFace)',
161
+ provider: 'HuggingFace',
162
+ maxTokenAllowed: 8000,
163
+ },
164
+ {
165
+ name: 'NousResearch/Hermes-3-Llama-3.1-8B',
166
+ label: 'Hermes-3-Llama-3.1-8B (HuggingFace)',
167
+ provider: 'HuggingFace',
168
+ maxTokenAllowed: 8000,
169
+ },
170
  ],
171
+ getApiKeyLink: 'https://huggingface.co/settings/tokens',
172
  },
173
 
174
  {
 
177
  { name: 'gpt-4o-mini', label: 'GPT-4o Mini', provider: 'OpenAI', maxTokenAllowed: 8000 },
178
  { name: 'gpt-4-turbo', label: 'GPT-4 Turbo', provider: 'OpenAI', maxTokenAllowed: 8000 },
179
  { name: 'gpt-4', label: 'GPT-4', provider: 'OpenAI', maxTokenAllowed: 8000 },
180
+ { name: 'gpt-3.5-turbo', label: 'GPT-3.5 Turbo', provider: 'OpenAI', maxTokenAllowed: 8000 },
181
  ],
182
+ getApiKeyLink: 'https://platform.openai.com/api-keys',
183
+ },
184
+ {
185
  name: 'xAI',
186
+ staticModels: [{ name: 'grok-beta', label: 'xAI Grok Beta', provider: 'xAI', maxTokenAllowed: 8000 }],
187
+ getApiKeyLink: 'https://docs.x.ai/docs/quickstart#creating-an-api-key',
188
+ },
189
+ {
 
190
  name: 'Deepseek',
191
  staticModels: [
192
  { name: 'deepseek-coder', label: 'Deepseek-Coder', provider: 'Deepseek', maxTokenAllowed: 8000 },
193
+ { name: 'deepseek-chat', label: 'Deepseek-Chat', provider: 'Deepseek', maxTokenAllowed: 8000 },
194
  ],
195
+ getApiKeyLink: 'https://platform.deepseek.com/api_keys',
196
+ },
197
+ {
198
  name: 'Mistral',
199
  staticModels: [
200
  { name: 'open-mistral-7b', label: 'Mistral 7B', provider: 'Mistral', maxTokenAllowed: 8000 },
 
205
  { name: 'ministral-8b-latest', label: 'Mistral 8B', provider: 'Mistral', maxTokenAllowed: 8000 },
206
  { name: 'mistral-small-latest', label: 'Mistral Small', provider: 'Mistral', maxTokenAllowed: 8000 },
207
  { name: 'codestral-latest', label: 'Codestral', provider: 'Mistral', maxTokenAllowed: 8000 },
208
+ { name: 'mistral-large-latest', label: 'Mistral Large Latest', provider: 'Mistral', maxTokenAllowed: 8000 },
209
  ],
210
+ getApiKeyLink: 'https://console.mistral.ai/api-keys/',
211
+ },
212
+ {
213
  name: 'LMStudio',
214
  staticModels: [],
215
  getDynamicModels: getLMStudioModels,
216
  getApiKeyLink: 'https://lmstudio.ai/',
217
  labelForGetApiKey: 'Get LMStudio',
218
+ icon: 'i-ph:cloud-arrow-down',
219
+ },
220
  ];
221
 
222
  export const DEFAULT_PROVIDER = PROVIDER_LIST[0];
223
 
224
+ const staticModels: ModelInfo[] = PROVIDER_LIST.map((p) => p.staticModels).flat();
225
 
226
  export let MODEL_LIST: ModelInfo[] = [...staticModels];
227
 
228
  const getOllamaBaseUrl = () => {
229
  const defaultBaseUrl = import.meta.env.OLLAMA_API_BASE_URL || 'http://localhost:11434';
230
+
231
  // Check if we're in the browser
232
  if (typeof window !== 'undefined') {
233
  // Frontend always uses localhost
 
237
  // Backend: Check if we're running in Docker
238
  const isDocker = process.env.RUNNING_IN_DOCKER === 'true';
239
 
240
+ return isDocker ? defaultBaseUrl.replace('localhost', 'host.docker.internal') : defaultBaseUrl;
 
 
241
  };
242
 
243
  async function getOllamaModels(): Promise<ModelInfo[]> {
244
  try {
245
  const base_url = getOllamaBaseUrl();
246
  const response = await fetch(`${base_url}/api/tags`);
247
+ const data = (await response.json()) as OllamaApiResponse;
248
 
249
  return data.models.map((model: OllamaModel) => ({
250
  name: model.name,
251
  label: `${model.name} (${model.details.parameter_size})`,
252
  provider: 'Ollama',
253
+ maxTokenAllowed: 8000,
254
  }));
255
  } catch (e) {
256
  return [];
 
260
  async function getOpenAILikeModels(): Promise<ModelInfo[]> {
261
  try {
262
  const base_url = import.meta.env.OPENAI_LIKE_API_BASE_URL || '';
263
+
264
  if (!base_url) {
265
  return [];
266
  }
267
+
268
  const api_key = import.meta.env.OPENAI_LIKE_API_KEY ?? '';
269
  const response = await fetch(`${base_url}/models`, {
270
  headers: {
271
+ Authorization: `Bearer ${api_key}`,
272
+ },
273
  });
274
+ const res = (await response.json()) as any;
275
+
276
  return res.data.map((model: any) => ({
277
  name: model.id,
278
  label: model.id,
279
+ provider: 'OpenAILike',
280
  }));
281
  } catch (e) {
282
  return [];
 
291
  pricing: {
292
  prompt: number;
293
  completion: number;
294
+ };
295
+ }[];
296
  };
297
 
298
  async function getOpenRouterModels(): Promise<ModelInfo[]> {
299
+ const data: OpenRouterModelsResponse = await (
300
+ await fetch('https://openrouter.ai/api/v1/models', {
301
+ headers: {
302
+ 'Content-Type': 'application/json',
303
+ },
304
+ })
305
+ ).json();
306
 
307
+ return data.data
308
+ .sort((a, b) => a.name.localeCompare(b.name))
309
+ .map((m) => ({
310
+ name: m.id,
311
+ label: `${m.name} - in:$${(m.pricing.prompt * 1_000_000).toFixed(
312
+ 2,
313
+ )} out:$${(m.pricing.completion * 1_000_000).toFixed(2)} - context ${Math.floor(m.context_length / 1000)}k`,
314
+ provider: 'OpenRouter',
315
+ maxTokenAllowed: 8000,
316
+ }));
317
  }
318
 
319
  async function getLMStudioModels(): Promise<ModelInfo[]> {
320
  try {
321
  const base_url = import.meta.env.LMSTUDIO_API_BASE_URL || 'http://localhost:1234';
322
  const response = await fetch(`${base_url}/v1/models`);
323
+ const data = (await response.json()) as any;
324
+
325
  return data.data.map((model: any) => ({
326
  name: model.id,
327
  label: model.id,
328
+ provider: 'LMStudio',
329
  }));
330
  } catch (e) {
331
  return [];
332
  }
333
  }
334
 
 
 
335
  async function initializeModelList(): Promise<ModelInfo[]> {
336
+ MODEL_LIST = [
337
+ ...(
338
+ await Promise.all(
339
+ PROVIDER_LIST.filter(
340
+ (p): p is ProviderInfo & { getDynamicModels: () => Promise<ModelInfo[]> } => !!p.getDynamicModels,
341
+ ).map((p) => p.getDynamicModels()),
342
+ )
343
+ ).flat(),
344
+ ...staticModels,
345
+ ];
346
  return MODEL_LIST;
347
  }
348
 
349
+ export {
350
+ getOllamaModels,
351
+ getOpenAILikeModels,
352
+ getLMStudioModels,
353
+ initializeModelList,
354
+ getOpenRouterModels,
355
+ PROVIDER_LIST,
356
+ };
app/utils/logger.ts CHANGED
@@ -11,7 +11,7 @@ interface Logger {
11
  setLevel: (level: DebugLevel) => void;
12
  }
13
 
14
- let currentLevel: DebugLevel = import.meta.env.VITE_LOG_LEVEL ?? import.meta.env.DEV ? 'debug' : 'info';
15
 
16
  const isWorker = 'HTMLRewriter' in globalThis;
17
  const supportsColor = !isWorker;
 
11
  setLevel: (level: DebugLevel) => void;
12
  }
13
 
14
+ let currentLevel: DebugLevel = (import.meta.env.VITE_LOG_LEVEL ?? import.meta.env.DEV) ? 'debug' : 'info';
15
 
16
  const isWorker = 'HTMLRewriter' in globalThis;
17
  const supportsColor = !isWorker;
app/utils/shell.ts CHANGED
@@ -52,66 +52,70 @@ export async function newShellProcess(webcontainer: WebContainer, terminal: ITer
52
  return process;
53
  }
54
 
55
-
56
-
57
  export class BoltShell {
58
- #initialized: (() => void) | undefined
59
- #readyPromise: Promise<void>
60
- #webcontainer: WebContainer | undefined
61
- #terminal: ITerminal | undefined
62
- #process: WebContainerProcess | undefined
63
- executionState = atom<{ sessionId: string, active: boolean, executionPrms?: Promise<any> } | undefined>()
64
- #outputStream: ReadableStreamDefaultReader<string> | undefined
65
- #shellInputStream: WritableStreamDefaultWriter<string> | undefined
66
  constructor() {
67
  this.#readyPromise = new Promise((resolve) => {
68
- this.#initialized = resolve
69
- })
70
  }
71
  ready() {
72
  return this.#readyPromise;
73
  }
74
  async init(webcontainer: WebContainer, terminal: ITerminal) {
75
- this.#webcontainer = webcontainer
76
- this.#terminal = terminal
77
- let callback = (data: string) => {
78
- console.log(data)
79
- }
80
- let { process, output } = await this.newBoltShellProcess(webcontainer, terminal)
81
- this.#process = process
82
- this.#outputStream = output.getReader()
83
- await this.waitTillOscCode('interactive')
84
- this.#initialized?.()
 
85
  }
86
  get terminal() {
87
- return this.#terminal
88
  }
89
  get process() {
90
- return this.#process
91
  }
92
  async executeCommand(sessionId: string, command: string) {
93
  if (!this.process || !this.terminal) {
94
- return
95
  }
96
- let state = this.executionState.get()
97
 
98
- //interrupt the current execution
99
- // this.#shellInputStream?.write('\x03');
 
 
 
 
100
  this.terminal.input('\x03');
 
101
  if (state && state.executionPrms) {
102
- await state.executionPrms
103
  }
 
104
  //start a new execution
105
  this.terminal.input(command.trim() + '\n');
106
 
107
  //wait for the execution to finish
108
- let executionPrms = this.getCurrentExecutionResult()
109
- this.executionState.set({ sessionId, active: true, executionPrms })
110
 
111
- let resp = await executionPrms
112
- this.executionState.set({ sessionId, active: false })
113
- return resp
114
 
 
115
  }
116
  async newBoltShellProcess(webcontainer: WebContainer, terminal: ITerminal) {
117
  const args: string[] = [];
@@ -126,6 +130,7 @@ export class BoltShell {
126
 
127
  const input = process.input.getWriter();
128
  this.#shellInputStream = input;
 
129
  const [internalOutput, terminalOutput] = process.output.tee();
130
 
131
  const jshReady = withResolvers<void>();
@@ -163,30 +168,41 @@ export class BoltShell {
163
  return { process, output: internalOutput };
164
  }
165
  async getCurrentExecutionResult() {
166
- let { output, exitCode } = await this.waitTillOscCode('exit')
167
  return { output, exitCode };
168
  }
169
  async waitTillOscCode(waitCode: string) {
170
  let fullOutput = '';
171
  let exitCode: number = 0;
172
- if (!this.#outputStream) return { output: fullOutput, exitCode };
173
- let tappedStream = this.#outputStream
 
 
 
 
174
 
175
  while (true) {
176
  const { value, done } = await tappedStream.read();
177
- if (done) break;
 
 
 
 
178
  const text = value || '';
179
  fullOutput += text;
180
 
181
  // Check if command completion signal with exit code
182
  const [, osc, , pid, code] = text.match(/\x1b\]654;([^\x07=]+)=?((-?\d+):(\d+))?\x07/) || [];
 
183
  if (osc === 'exit') {
184
  exitCode = parseInt(code, 10);
185
  }
 
186
  if (osc === waitCode) {
187
  break;
188
  }
189
  }
 
190
  return { output: fullOutput, exitCode };
191
  }
192
  }
 
52
  return process;
53
  }
54
 
 
 
55
  export class BoltShell {
56
+ #initialized: (() => void) | undefined;
57
+ #readyPromise: Promise<void>;
58
+ #webcontainer: WebContainer | undefined;
59
+ #terminal: ITerminal | undefined;
60
+ #process: WebContainerProcess | undefined;
61
+ executionState = atom<{ sessionId: string; active: boolean; executionPrms?: Promise<any> } | undefined>();
62
+ #outputStream: ReadableStreamDefaultReader<string> | undefined;
63
+ #shellInputStream: WritableStreamDefaultWriter<string> | undefined;
64
  constructor() {
65
  this.#readyPromise = new Promise((resolve) => {
66
+ this.#initialized = resolve;
67
+ });
68
  }
69
  ready() {
70
  return this.#readyPromise;
71
  }
72
  async init(webcontainer: WebContainer, terminal: ITerminal) {
73
+ this.#webcontainer = webcontainer;
74
+ this.#terminal = terminal;
75
+
76
+ const callback = (data: string) => {
77
+ console.log(data);
78
+ };
79
+ const { process, output } = await this.newBoltShellProcess(webcontainer, terminal);
80
+ this.#process = process;
81
+ this.#outputStream = output.getReader();
82
+ await this.waitTillOscCode('interactive');
83
+ this.#initialized?.();
84
  }
85
  get terminal() {
86
+ return this.#terminal;
87
  }
88
  get process() {
89
+ return this.#process;
90
  }
91
  async executeCommand(sessionId: string, command: string) {
92
  if (!this.process || !this.terminal) {
93
+ return;
94
  }
 
95
 
96
+ const state = this.executionState.get();
97
+
98
+ /*
99
+ * interrupt the current execution
100
+ * this.#shellInputStream?.write('\x03');
101
+ */
102
  this.terminal.input('\x03');
103
+
104
  if (state && state.executionPrms) {
105
+ await state.executionPrms;
106
  }
107
+
108
  //start a new execution
109
  this.terminal.input(command.trim() + '\n');
110
 
111
  //wait for the execution to finish
112
+ const executionPrms = this.getCurrentExecutionResult();
113
+ this.executionState.set({ sessionId, active: true, executionPrms });
114
 
115
+ const resp = await executionPrms;
116
+ this.executionState.set({ sessionId, active: false });
 
117
 
118
+ return resp;
119
  }
120
  async newBoltShellProcess(webcontainer: WebContainer, terminal: ITerminal) {
121
  const args: string[] = [];
 
130
 
131
  const input = process.input.getWriter();
132
  this.#shellInputStream = input;
133
+
134
  const [internalOutput, terminalOutput] = process.output.tee();
135
 
136
  const jshReady = withResolvers<void>();
 
168
  return { process, output: internalOutput };
169
  }
170
  async getCurrentExecutionResult() {
171
+ const { output, exitCode } = await this.waitTillOscCode('exit');
172
  return { output, exitCode };
173
  }
174
  async waitTillOscCode(waitCode: string) {
175
  let fullOutput = '';
176
  let exitCode: number = 0;
177
+
178
+ if (!this.#outputStream) {
179
+ return { output: fullOutput, exitCode };
180
+ }
181
+
182
+ const tappedStream = this.#outputStream;
183
 
184
  while (true) {
185
  const { value, done } = await tappedStream.read();
186
+
187
+ if (done) {
188
+ break;
189
+ }
190
+
191
  const text = value || '';
192
  fullOutput += text;
193
 
194
  // Check if command completion signal with exit code
195
  const [, osc, , pid, code] = text.match(/\x1b\]654;([^\x07=]+)=?((-?\d+):(\d+))?\x07/) || [];
196
+
197
  if (osc === 'exit') {
198
  exitCode = parseInt(code, 10);
199
  }
200
+
201
  if (osc === waitCode) {
202
  break;
203
  }
204
  }
205
+
206
  return { output: fullOutput, exitCode };
207
  }
208
  }
app/utils/types.ts CHANGED
@@ -1,4 +1,3 @@
1
-
2
  interface OllamaModelDetails {
3
  parent_model: string;
4
  format: string;
@@ -29,10 +28,10 @@ export interface ModelInfo {
29
  }
30
 
31
  export interface ProviderInfo {
32
- staticModels: ModelInfo[],
33
- name: string,
34
- getDynamicModels?: () => Promise<ModelInfo[]>,
35
- getApiKeyLink?: string,
36
- labelForGetApiKey?: string,
37
- icon?:string,
38
- };
 
 
1
  interface OllamaModelDetails {
2
  parent_model: string;
3
  format: string;
 
28
  }
29
 
30
  export interface ProviderInfo {
31
+ staticModels: ModelInfo[];
32
+ name: string;
33
+ getDynamicModels?: () => Promise<ModelInfo[]>;
34
+ getApiKeyLink?: string;
35
+ labelForGetApiKey?: string;
36
+ icon?: string;
37
+ }