Eduards commited on
Commit
7fc8e40
·
unverified ·
2 Parent(s): 753f0b3 4589014

Merge pull request #367 from mrsimpson/linting

Browse files
app/components/chat/APIKeyManager.tsx CHANGED
@@ -10,6 +10,7 @@ interface APIKeyManagerProps {
10
  labelForGetApiKey?: string;
11
  }
12
 
 
13
  export const APIKeyManager: React.FC<APIKeyManagerProps> = ({ provider, apiKey, setApiKey }) => {
14
  const [isEditing, setIsEditing] = useState(false);
15
  const [tempKey, setTempKey] = useState(apiKey);
 
10
  labelForGetApiKey?: string;
11
  }
12
 
13
+ // eslint-disable-next-line @typescript-eslint/naming-convention
14
  export const APIKeyManager: React.FC<APIKeyManagerProps> = ({ provider, apiKey, setApiKey }) => {
15
  const [isEditing, setIsEditing] = useState(false);
16
  const [tempKey, setTempKey] = useState(apiKey);
app/components/chat/BaseChat.tsx CHANGED
@@ -1,5 +1,7 @@
1
- // @ts-nocheck
2
- // Preventing TS checks with files presented in the video for a better presentation.
 
 
3
  import type { Message } from 'ai';
4
  import React, { type RefCallback, useEffect } from 'react';
5
  import { ClientOnly } from 'remix-utils/client-only';
@@ -7,7 +9,7 @@ import { Menu } from '~/components/sidebar/Menu.client';
7
  import { IconButton } from '~/components/ui/IconButton';
8
  import { Workbench } from '~/components/workbench/Workbench.client';
9
  import { classNames } from '~/utils/classNames';
10
- import { MODEL_LIST, DEFAULT_PROVIDER, PROVIDER_LIST, initializeModelList } from '~/utils/constants';
11
  import { Messages } from './Messages.client';
12
  import { SendButton } from './SendButton.client';
13
  import { useState } from 'react';
@@ -25,21 +27,25 @@ const EXAMPLE_PROMPTS = [
25
  { text: 'How do I center a div?' },
26
  ];
27
 
 
28
  const providerList = PROVIDER_LIST;
29
 
 
 
30
  const ModelSelector = ({ model, setModel, provider, setProvider, modelList, providerList, apiKeys }) => {
31
  return (
32
  <div className="mb-2 flex gap-2 flex-col sm:flex-row">
33
  <select
34
  value={provider?.name}
35
  onChange={(e) => {
36
- setProvider(providerList.find((p) => p.name === e.target.value));
 
37
  const firstModel = [...modelList].find((m) => m.provider == e.target.value);
38
  setModel(firstModel ? firstModel.name : '');
39
  }}
40
  className="flex-1 p-2 rounded-lg border border-bolt-elements-borderColor bg-bolt-elements-prompt-background text-bolt-elements-textPrimary focus:outline-none focus:ring-2 focus:ring-bolt-elements-focus transition-all"
41
  >
42
- {providerList.map((provider) => (
43
  <option key={provider.name} value={provider.name}>
44
  {provider.name}
45
  </option>
@@ -118,14 +124,17 @@ export const BaseChat = React.forwardRef<HTMLDivElement, BaseChatProps>(
118
  // Load API keys from cookies on component mount
119
  try {
120
  const storedApiKeys = Cookies.get('apiKeys');
 
121
  if (storedApiKeys) {
122
  const parsedKeys = JSON.parse(storedApiKeys);
 
123
  if (typeof parsedKeys === 'object' && parsedKeys !== null) {
124
  setApiKeys(parsedKeys);
125
  }
126
  }
127
  } catch (error) {
128
  console.error('Error loading API keys from cookies:', error);
 
129
  // Clear invalid cookie data
130
  Cookies.remove('apiKeys');
131
  }
@@ -139,6 +148,7 @@ export const BaseChat = React.forwardRef<HTMLDivElement, BaseChatProps>(
139
  try {
140
  const updatedApiKeys = { ...apiKeys, [provider]: key };
141
  setApiKeys(updatedApiKeys);
 
142
  // Save updated API keys to cookies with 30 day expiry and secure settings
143
  Cookies.set('apiKeys', JSON.stringify(updatedApiKeys), {
144
  expires: 30, // 30 days
 
1
+ /*
2
+ * @ts-nocheck
3
+ * Preventing TS checks with files presented in the video for a better presentation.
4
+ */
5
  import type { Message } from 'ai';
6
  import React, { type RefCallback, useEffect } from 'react';
7
  import { ClientOnly } from 'remix-utils/client-only';
 
9
  import { IconButton } from '~/components/ui/IconButton';
10
  import { Workbench } from '~/components/workbench/Workbench.client';
11
  import { classNames } from '~/utils/classNames';
12
+ import { MODEL_LIST, PROVIDER_LIST, initializeModelList } from '~/utils/constants';
13
  import { Messages } from './Messages.client';
14
  import { SendButton } from './SendButton.client';
15
  import { useState } from 'react';
 
27
  { text: 'How do I center a div?' },
28
  ];
29
 
30
+ // eslint-disable-next-line @typescript-eslint/no-unused-vars
31
  const providerList = PROVIDER_LIST;
32
 
33
+ // @ts-ignore TODO: Introduce proper types
34
+ // eslint-disable-next-line @typescript-eslint/no-unused-vars
35
  const ModelSelector = ({ model, setModel, provider, setProvider, modelList, providerList, apiKeys }) => {
36
  return (
37
  <div className="mb-2 flex gap-2 flex-col sm:flex-row">
38
  <select
39
  value={provider?.name}
40
  onChange={(e) => {
41
+ setProvider(providerList.find((p: ProviderInfo) => p.name === e.target.value));
42
+
43
  const firstModel = [...modelList].find((m) => m.provider == e.target.value);
44
  setModel(firstModel ? firstModel.name : '');
45
  }}
46
  className="flex-1 p-2 rounded-lg border border-bolt-elements-borderColor bg-bolt-elements-prompt-background text-bolt-elements-textPrimary focus:outline-none focus:ring-2 focus:ring-bolt-elements-focus transition-all"
47
  >
48
+ {providerList.map((provider: ProviderInfo) => (
49
  <option key={provider.name} value={provider.name}>
50
  {provider.name}
51
  </option>
 
124
  // Load API keys from cookies on component mount
125
  try {
126
  const storedApiKeys = Cookies.get('apiKeys');
127
+
128
  if (storedApiKeys) {
129
  const parsedKeys = JSON.parse(storedApiKeys);
130
+
131
  if (typeof parsedKeys === 'object' && parsedKeys !== null) {
132
  setApiKeys(parsedKeys);
133
  }
134
  }
135
  } catch (error) {
136
  console.error('Error loading API keys from cookies:', error);
137
+
138
  // Clear invalid cookie data
139
  Cookies.remove('apiKeys');
140
  }
 
148
  try {
149
  const updatedApiKeys = { ...apiKeys, [provider]: key };
150
  setApiKeys(updatedApiKeys);
151
+
152
  // Save updated API keys to cookies with 30 day expiry and secure settings
153
  Cookies.set('apiKeys', JSON.stringify(updatedApiKeys), {
154
  expires: 30, // 30 days
app/components/chat/Chat.client.tsx CHANGED
@@ -1,5 +1,7 @@
1
- // @ts-nocheck
2
- // Preventing TS checks with files presented in the video for a better presentation.
 
 
3
  import { useStore } from '@nanostores/react';
4
  import type { Message } from 'ai';
5
  import { useChat } from 'ai/react';
@@ -81,7 +83,7 @@ export const ChatImpl = memo(({ initialMessages, storeMessageHistory }: ChatProp
81
  });
82
  const [provider, setProvider] = useState(() => {
83
  const savedProvider = Cookies.get('selectedProvider');
84
- return PROVIDER_LIST.find(p => p.name === savedProvider) || DEFAULT_PROVIDER;
85
  });
86
 
87
  const { showChat } = useStore(chatStore);
@@ -93,11 +95,13 @@ export const ChatImpl = memo(({ initialMessages, storeMessageHistory }: ChatProp
93
  const { messages, isLoading, input, handleInputChange, setInput, stop, append } = useChat({
94
  api: '/api/chat',
95
  body: {
96
- apiKeys
97
  },
98
  onError: (error) => {
99
  logger.error('Request failed\n\n', error);
100
- toast.error('There was an error processing your request: ' + (error.message ? error.message : "No details were returned"));
 
 
101
  },
102
  onFinish: () => {
103
  logger.debug('Finished streaming');
@@ -218,6 +222,7 @@ export const ChatImpl = memo(({ initialMessages, storeMessageHistory }: ChatProp
218
 
219
  useEffect(() => {
220
  const storedApiKeys = Cookies.get('apiKeys');
 
221
  if (storedApiKeys) {
222
  setApiKeys(JSON.parse(storedApiKeys));
223
  }
@@ -271,7 +276,7 @@ export const ChatImpl = memo(({ initialMessages, storeMessageHistory }: ChatProp
271
  },
272
  model,
273
  provider,
274
- apiKeys
275
  );
276
  }}
277
  />
 
1
+ /*
2
+ * @ts-nocheck
3
+ * Preventing TS checks with files presented in the video for a better presentation.
4
+ */
5
  import { useStore } from '@nanostores/react';
6
  import type { Message } from 'ai';
7
  import { useChat } from 'ai/react';
 
83
  });
84
  const [provider, setProvider] = useState(() => {
85
  const savedProvider = Cookies.get('selectedProvider');
86
+ return PROVIDER_LIST.find((p) => p.name === savedProvider) || DEFAULT_PROVIDER;
87
  });
88
 
89
  const { showChat } = useStore(chatStore);
 
95
  const { messages, isLoading, input, handleInputChange, setInput, stop, append } = useChat({
96
  api: '/api/chat',
97
  body: {
98
+ apiKeys,
99
  },
100
  onError: (error) => {
101
  logger.error('Request failed\n\n', error);
102
+ toast.error(
103
+ 'There was an error processing your request: ' + (error.message ? error.message : 'No details were returned'),
104
+ );
105
  },
106
  onFinish: () => {
107
  logger.debug('Finished streaming');
 
222
 
223
  useEffect(() => {
224
  const storedApiKeys = Cookies.get('apiKeys');
225
+
226
  if (storedApiKeys) {
227
  setApiKeys(JSON.parse(storedApiKeys));
228
  }
 
276
  },
277
  model,
278
  provider,
279
+ apiKeys,
280
  );
281
  }}
282
  />
app/components/chat/UserMessage.tsx CHANGED
@@ -1,5 +1,7 @@
1
- // @ts-nocheck
2
- // Preventing TS checks with files presented in the video for a better presentation.
 
 
3
  import { modificationsRegex } from '~/utils/diff';
4
  import { MODEL_REGEX, PROVIDER_REGEX } from '~/utils/constants';
5
  import { Markdown } from './Markdown';
@@ -17,5 +19,9 @@ export function UserMessage({ content }: UserMessageProps) {
17
  }
18
 
19
  function sanitizeUserMessage(content: string) {
20
- return content.replace(modificationsRegex, '').replace(MODEL_REGEX, 'Using: $1').replace(PROVIDER_REGEX, ' ($1)\n\n').trim();
 
 
 
 
21
  }
 
1
+ /*
2
+ * @ts-nocheck
3
+ * Preventing TS checks with files presented in the video for a better presentation.
4
+ */
5
  import { modificationsRegex } from '~/utils/diff';
6
  import { MODEL_REGEX, PROVIDER_REGEX } from '~/utils/constants';
7
  import { Markdown } from './Markdown';
 
19
  }
20
 
21
  function sanitizeUserMessage(content: string) {
22
+ return content
23
+ .replace(modificationsRegex, '')
24
+ .replace(MODEL_REGEX, 'Using: $1')
25
+ .replace(PROVIDER_REGEX, ' ($1)\n\n')
26
+ .trim();
27
  }
app/components/sidebar/Menu.client.tsx CHANGED
@@ -2,7 +2,6 @@ import { motion, type Variants } from 'framer-motion';
2
  import { useCallback, useEffect, useRef, useState } from 'react';
3
  import { toast } from 'react-toastify';
4
  import { Dialog, DialogButton, DialogDescription, DialogRoot, DialogTitle } from '~/components/ui/Dialog';
5
- import { IconButton } from '~/components/ui/IconButton';
6
  import { ThemeSwitch } from '~/components/ui/ThemeSwitch';
7
  import { db, deleteById, getAll, chatId, type ChatHistoryItem, useChatHistory } from '~/lib/persistence';
8
  import { cubicEasingFn } from '~/utils/easings';
 
2
  import { useCallback, useEffect, useRef, useState } from 'react';
3
  import { toast } from 'react-toastify';
4
  import { Dialog, DialogButton, DialogDescription, DialogRoot, DialogTitle } from '~/components/ui/Dialog';
 
5
  import { ThemeSwitch } from '~/components/ui/ThemeSwitch';
6
  import { db, deleteById, getAll, chatId, type ChatHistoryItem, useChatHistory } from '~/lib/persistence';
7
  import { cubicEasingFn } from '~/utils/easings';
app/components/workbench/EditorPanel.tsx CHANGED
@@ -255,6 +255,7 @@ export const EditorPanel = memo(
255
  </div>
256
  {Array.from({ length: terminalCount + 1 }, (_, index) => {
257
  const isActive = activeTerminal === index;
 
258
  if (index == 0) {
259
  logger.info('Starting bolt terminal');
260
 
@@ -273,6 +274,7 @@ export const EditorPanel = memo(
273
  />
274
  );
275
  }
 
276
  return (
277
  <Terminal
278
  key={index}
 
255
  </div>
256
  {Array.from({ length: terminalCount + 1 }, (_, index) => {
257
  const isActive = activeTerminal === index;
258
+
259
  if (index == 0) {
260
  logger.info('Starting bolt terminal');
261
 
 
274
  />
275
  );
276
  }
277
+
278
  return (
279
  <Terminal
280
  key={index}
app/components/workbench/FileTree.tsx CHANGED
@@ -111,7 +111,7 @@ export const FileTree = memo(
111
  };
112
 
113
  return (
114
- <div className={classNames('text-sm', className ,'overflow-y-auto')}>
115
  {filteredFileList.map((fileOrFolder) => {
116
  switch (fileOrFolder.kind) {
117
  case 'file': {
 
111
  };
112
 
113
  return (
114
+ <div className={classNames('text-sm', className, 'overflow-y-auto')}>
115
  {filteredFileList.map((fileOrFolder) => {
116
  switch (fileOrFolder.kind) {
117
  case 'file': {
app/components/workbench/Workbench.client.tsx CHANGED
@@ -174,16 +174,21 @@ export const Workbench = memo(({ chatStarted, isStreaming }: WorkspaceProps) =>
174
  'Please enter a name for your new GitHub repository:',
175
  'bolt-generated-project',
176
  );
 
177
  if (!repoName) {
178
  alert('Repository name is required. Push to GitHub cancelled.');
179
  return;
180
  }
 
181
  const githubUsername = prompt('Please enter your GitHub username:');
 
182
  if (!githubUsername) {
183
  alert('GitHub username is required. Push to GitHub cancelled.');
184
  return;
185
  }
 
186
  const githubToken = prompt('Please enter your GitHub personal access token:');
 
187
  if (!githubToken) {
188
  alert('GitHub token is required. Push to GitHub cancelled.');
189
  return;
 
174
  'Please enter a name for your new GitHub repository:',
175
  'bolt-generated-project',
176
  );
177
+
178
  if (!repoName) {
179
  alert('Repository name is required. Push to GitHub cancelled.');
180
  return;
181
  }
182
+
183
  const githubUsername = prompt('Please enter your GitHub username:');
184
+
185
  if (!githubUsername) {
186
  alert('GitHub username is required. Push to GitHub cancelled.');
187
  return;
188
  }
189
+
190
  const githubToken = prompt('Please enter your GitHub personal access token:');
191
+
192
  if (!githubToken) {
193
  alert('GitHub token is required. Push to GitHub cancelled.');
194
  return;
app/lib/.server/llm/api-key.ts CHANGED
@@ -1,5 +1,7 @@
1
- // @ts-nocheck
2
- // Preventing TS checks with files presented in the video for a better presentation.
 
 
3
  import { env } from 'node:process';
4
 
5
  export function getAPIKey(cloudflareEnv: Env, provider: string, userApiKeys?: Record<string, string>) {
@@ -28,17 +30,19 @@ export function getAPIKey(cloudflareEnv: Env, provider: string, userApiKeys?: Re
28
  case 'OpenRouter':
29
  return env.OPEN_ROUTER_API_KEY || cloudflareEnv.OPEN_ROUTER_API_KEY;
30
  case 'Deepseek':
31
- return env.DEEPSEEK_API_KEY || cloudflareEnv.DEEPSEEK_API_KEY
32
  case 'Mistral':
33
- return env.MISTRAL_API_KEY || cloudflareEnv.MISTRAL_API_KEY;
34
- case "OpenAILike":
35
  return env.OPENAI_LIKE_API_KEY || cloudflareEnv.OPENAI_LIKE_API_KEY;
36
- case "xAI":
37
  return env.XAI_API_KEY || cloudflareEnv.XAI_API_KEY;
38
- case "Cohere":
39
  return env.COHERE_API_KEY;
 
 
40
  default:
41
- return "";
42
  }
43
  }
44
 
@@ -47,14 +51,17 @@ export function getBaseURL(cloudflareEnv: Env, provider: string) {
47
  case 'OpenAILike':
48
  return env.OPENAI_LIKE_API_BASE_URL || cloudflareEnv.OPENAI_LIKE_API_BASE_URL;
49
  case 'LMStudio':
50
- return env.LMSTUDIO_API_BASE_URL || cloudflareEnv.LMSTUDIO_API_BASE_URL || "http://localhost:1234";
51
- case 'Ollama':
52
- let baseUrl = env.OLLAMA_API_BASE_URL || cloudflareEnv.OLLAMA_API_BASE_URL || "http://localhost:11434";
53
- if (env.RUNNING_IN_DOCKER === 'true') {
54
- baseUrl = baseUrl.replace("localhost", "host.docker.internal");
55
- }
56
- return baseUrl;
 
 
 
57
  default:
58
- return "";
59
  }
60
  }
 
1
+ /*
2
+ * @ts-nocheck
3
+ * Preventing TS checks with files presented in the video for a better presentation.
4
+ */
5
  import { env } from 'node:process';
6
 
7
  export function getAPIKey(cloudflareEnv: Env, provider: string, userApiKeys?: Record<string, string>) {
 
30
  case 'OpenRouter':
31
  return env.OPEN_ROUTER_API_KEY || cloudflareEnv.OPEN_ROUTER_API_KEY;
32
  case 'Deepseek':
33
+ return env.DEEPSEEK_API_KEY || cloudflareEnv.DEEPSEEK_API_KEY;
34
  case 'Mistral':
35
+ return env.MISTRAL_API_KEY || cloudflareEnv.MISTRAL_API_KEY;
36
+ case 'OpenAILike':
37
  return env.OPENAI_LIKE_API_KEY || cloudflareEnv.OPENAI_LIKE_API_KEY;
38
+ case 'xAI':
39
  return env.XAI_API_KEY || cloudflareEnv.XAI_API_KEY;
40
+ case 'Cohere':
41
  return env.COHERE_API_KEY;
42
+ case 'AzureOpenAI':
43
+ return env.AZURE_OPENAI_API_KEY;
44
  default:
45
+ return '';
46
  }
47
  }
48
 
 
51
  case 'OpenAILike':
52
  return env.OPENAI_LIKE_API_BASE_URL || cloudflareEnv.OPENAI_LIKE_API_BASE_URL;
53
  case 'LMStudio':
54
+ return env.LMSTUDIO_API_BASE_URL || cloudflareEnv.LMSTUDIO_API_BASE_URL || 'http://localhost:1234';
55
+ case 'Ollama': {
56
+ let baseUrl = env.OLLAMA_API_BASE_URL || cloudflareEnv.OLLAMA_API_BASE_URL || 'http://localhost:11434';
57
+
58
+ if (env.RUNNING_IN_DOCKER === 'true') {
59
+ baseUrl = baseUrl.replace('localhost', 'host.docker.internal');
60
+ }
61
+
62
+ return baseUrl;
63
+ }
64
  default:
65
+ return '';
66
  }
67
  }
app/lib/.server/llm/model.ts CHANGED
@@ -1,26 +1,29 @@
1
- // @ts-nocheck
2
- // Preventing TS checks with files presented in the video for a better presentation.
 
 
3
  import { getAPIKey, getBaseURL } from '~/lib/.server/llm/api-key';
4
  import { createAnthropic } from '@ai-sdk/anthropic';
5
  import { createOpenAI } from '@ai-sdk/openai';
6
  import { createGoogleGenerativeAI } from '@ai-sdk/google';
7
  import { ollama } from 'ollama-ai-provider';
8
- import { createOpenRouter } from "@openrouter/ai-sdk-provider";
9
  import { createMistral } from '@ai-sdk/mistral';
10
- import { createCohere } from '@ai-sdk/cohere'
 
11
 
12
- export const DEFAULT_NUM_CTX = process.env.DEFAULT_NUM_CTX ?
13
- parseInt(process.env.DEFAULT_NUM_CTX, 10) :
14
- 32768;
15
 
16
- export function getAnthropicModel(apiKey: string, model: string) {
 
 
17
  const anthropic = createAnthropic({
18
  apiKey,
19
  });
20
 
21
  return anthropic(model);
22
  }
23
- export function getOpenAILikeModel(baseURL:string,apiKey: string, model: string) {
24
  const openai = createOpenAI({
25
  baseURL,
26
  apiKey,
@@ -29,7 +32,7 @@ export function getOpenAILikeModel(baseURL:string,apiKey: string, model: string)
29
  return openai(model);
30
  }
31
 
32
- export function getCohereAIModel(apiKey:string, model: string){
33
  const cohere = createCohere({
34
  apiKey,
35
  });
@@ -37,7 +40,7 @@ export function getCohereAIModel(apiKey:string, model: string){
37
  return cohere(model);
38
  }
39
 
40
- export function getOpenAIModel(apiKey: string, model: string) {
41
  const openai = createOpenAI({
42
  apiKey,
43
  });
@@ -45,15 +48,15 @@ export function getOpenAIModel(apiKey: string, model: string) {
45
  return openai(model);
46
  }
47
 
48
- export function getMistralModel(apiKey: string, model: string) {
49
  const mistral = createMistral({
50
- apiKey
51
  });
52
 
53
  return mistral(model);
54
  }
55
 
56
- export function getGoogleModel(apiKey: string, model: string) {
57
  const google = createGoogleGenerativeAI({
58
  apiKey,
59
  });
@@ -61,7 +64,7 @@ export function getGoogleModel(apiKey: string, model: string) {
61
  return google(model);
62
  }
63
 
64
- export function getGroqModel(apiKey: string, model: string) {
65
  const openai = createOpenAI({
66
  baseURL: 'https://api.groq.com/openai/v1',
67
  apiKey,
@@ -70,7 +73,7 @@ export function getGroqModel(apiKey: string, model: string) {
70
  return openai(model);
71
  }
72
 
73
- export function getHuggingFaceModel(apiKey: string, model: string) {
74
  const openai = createOpenAI({
75
  baseURL: 'https://api-inference.huggingface.co/v1/',
76
  apiKey,
@@ -80,15 +83,16 @@ export function getHuggingFaceModel(apiKey: string, model: string) {
80
  }
81
 
82
  export function getOllamaModel(baseURL: string, model: string) {
83
- let Ollama = ollama(model, {
84
  numCtx: DEFAULT_NUM_CTX,
85
- });
86
 
87
- Ollama.config.baseURL = `${baseURL}/api`;
88
- return Ollama;
 
89
  }
90
 
91
- export function getDeepseekModel(apiKey: string, model: string){
92
  const openai = createOpenAI({
93
  baseURL: 'https://api.deepseek.com/beta',
94
  apiKey,
@@ -97,9 +101,9 @@ export function getDeepseekModel(apiKey: string, model: string){
97
  return openai(model);
98
  }
99
 
100
- export function getOpenRouterModel(apiKey: string, model: string) {
101
  const openRouter = createOpenRouter({
102
- apiKey
103
  });
104
 
105
  return openRouter.chat(model);
@@ -108,13 +112,13 @@ export function getOpenRouterModel(apiKey: string, model: string) {
108
  export function getLMStudioModel(baseURL: string, model: string) {
109
  const lmstudio = createOpenAI({
110
  baseUrl: `${baseURL}/v1`,
111
- apiKey: "",
112
  });
113
 
114
  return lmstudio(model);
115
  }
116
 
117
- export function getXAIModel(apiKey: string, model: string) {
118
  const openai = createOpenAI({
119
  baseURL: 'https://api.x.ai/v1',
120
  apiKey,
@@ -123,7 +127,6 @@ export function getXAIModel(apiKey: string, model: string) {
123
  return openai(model);
124
  }
125
 
126
-
127
  export function getModel(provider: string, model: string, env: Env, apiKeys?: Record<string, string>) {
128
  const apiKey = getAPIKey(env, provider, apiKeys);
129
  const baseURL = getBaseURL(env, provider);
@@ -142,11 +145,11 @@ export function getModel(provider: string, model: string, env: Env, apiKeys?: Re
142
  case 'Google':
143
  return getGoogleModel(apiKey, model);
144
  case 'OpenAILike':
145
- return getOpenAILikeModel(baseURL,apiKey, model);
146
  case 'Deepseek':
147
  return getDeepseekModel(apiKey, model);
148
  case 'Mistral':
149
- return getMistralModel(apiKey, model);
150
  case 'LMStudio':
151
  return getLMStudioModel(baseURL, model);
152
  case 'xAI':
 
1
+ /*
2
+ * @ts-nocheck
3
+ * Preventing TS checks with files presented in the video for a better presentation.
4
+ */
5
  import { getAPIKey, getBaseURL } from '~/lib/.server/llm/api-key';
6
  import { createAnthropic } from '@ai-sdk/anthropic';
7
  import { createOpenAI } from '@ai-sdk/openai';
8
  import { createGoogleGenerativeAI } from '@ai-sdk/google';
9
  import { ollama } from 'ollama-ai-provider';
10
+ import { createOpenRouter } from '@openrouter/ai-sdk-provider';
11
  import { createMistral } from '@ai-sdk/mistral';
12
+ import { createCohere } from '@ai-sdk/cohere';
13
+ import type { LanguageModelV1 } from 'ai';
14
 
15
+ export const DEFAULT_NUM_CTX = process.env.DEFAULT_NUM_CTX ? parseInt(process.env.DEFAULT_NUM_CTX, 10) : 32768;
 
 
16
 
17
+ type OptionalApiKey = string | undefined;
18
+
19
+ export function getAnthropicModel(apiKey: OptionalApiKey, model: string) {
20
  const anthropic = createAnthropic({
21
  apiKey,
22
  });
23
 
24
  return anthropic(model);
25
  }
26
+ export function getOpenAILikeModel(baseURL: string, apiKey: OptionalApiKey, model: string) {
27
  const openai = createOpenAI({
28
  baseURL,
29
  apiKey,
 
32
  return openai(model);
33
  }
34
 
35
+ export function getCohereAIModel(apiKey: OptionalApiKey, model: string) {
36
  const cohere = createCohere({
37
  apiKey,
38
  });
 
40
  return cohere(model);
41
  }
42
 
43
+ export function getOpenAIModel(apiKey: OptionalApiKey, model: string) {
44
  const openai = createOpenAI({
45
  apiKey,
46
  });
 
48
  return openai(model);
49
  }
50
 
51
+ export function getMistralModel(apiKey: OptionalApiKey, model: string) {
52
  const mistral = createMistral({
53
+ apiKey,
54
  });
55
 
56
  return mistral(model);
57
  }
58
 
59
+ export function getGoogleModel(apiKey: OptionalApiKey, model: string) {
60
  const google = createGoogleGenerativeAI({
61
  apiKey,
62
  });
 
64
  return google(model);
65
  }
66
 
67
+ export function getGroqModel(apiKey: OptionalApiKey, model: string) {
68
  const openai = createOpenAI({
69
  baseURL: 'https://api.groq.com/openai/v1',
70
  apiKey,
 
73
  return openai(model);
74
  }
75
 
76
+ export function getHuggingFaceModel(apiKey: OptionalApiKey, model: string) {
77
  const openai = createOpenAI({
78
  baseURL: 'https://api-inference.huggingface.co/v1/',
79
  apiKey,
 
83
  }
84
 
85
  export function getOllamaModel(baseURL: string, model: string) {
86
+ const ollamaInstance = ollama(model, {
87
  numCtx: DEFAULT_NUM_CTX,
88
+ }) as LanguageModelV1 & { config: any };
89
 
90
+ ollamaInstance.config.baseURL = `${baseURL}/api`;
91
+
92
+ return ollamaInstance;
93
  }
94
 
95
+ export function getDeepseekModel(apiKey: OptionalApiKey, model: string) {
96
  const openai = createOpenAI({
97
  baseURL: 'https://api.deepseek.com/beta',
98
  apiKey,
 
101
  return openai(model);
102
  }
103
 
104
+ export function getOpenRouterModel(apiKey: OptionalApiKey, model: string) {
105
  const openRouter = createOpenRouter({
106
+ apiKey,
107
  });
108
 
109
  return openRouter.chat(model);
 
112
  export function getLMStudioModel(baseURL: string, model: string) {
113
  const lmstudio = createOpenAI({
114
  baseUrl: `${baseURL}/v1`,
115
+ apiKey: '',
116
  });
117
 
118
  return lmstudio(model);
119
  }
120
 
121
+ export function getXAIModel(apiKey: OptionalApiKey, model: string) {
122
  const openai = createOpenAI({
123
  baseURL: 'https://api.x.ai/v1',
124
  apiKey,
 
127
  return openai(model);
128
  }
129
 
 
130
  export function getModel(provider: string, model: string, env: Env, apiKeys?: Record<string, string>) {
131
  const apiKey = getAPIKey(env, provider, apiKeys);
132
  const baseURL = getBaseURL(env, provider);
 
145
  case 'Google':
146
  return getGoogleModel(apiKey, model);
147
  case 'OpenAILike':
148
+ return getOpenAILikeModel(baseURL, apiKey, model);
149
  case 'Deepseek':
150
  return getDeepseekModel(apiKey, model);
151
  case 'Mistral':
152
+ return getMistralModel(apiKey, model);
153
  case 'LMStudio':
154
  return getLMStudioModel(baseURL, model);
155
  case 'xAI':
app/lib/.server/llm/stream-text.ts CHANGED
@@ -1,5 +1,6 @@
1
- // @ts-nocheck
2
- // Preventing TS checks with files presented in the video for a better presentation.
 
3
  import { streamText as _streamText, convertToCoreMessages } from 'ai';
4
  import { getModel } from '~/lib/.server/llm/model';
5
  import { MAX_TOKENS } from './constants';
@@ -34,19 +35,12 @@ function extractPropertiesFromMessage(message: Message): { model: string; provid
34
  const provider = providerMatch ? providerMatch[1] : DEFAULT_PROVIDER;
35
 
36
  // Remove model and provider lines from content
37
- const cleanedContent = message.content
38
- .replace(MODEL_REGEX, '')
39
- .replace(PROVIDER_REGEX, '')
40
- .trim();
41
 
42
  return { model, provider, content: cleanedContent };
43
  }
44
- export function streamText(
45
- messages: Messages,
46
- env: Env,
47
- options?: StreamingOptions,
48
- apiKeys?: Record<string, string>
49
- ) {
50
  let currentModel = DEFAULT_MODEL;
51
  let currentProvider = DEFAULT_PROVIDER;
52
 
@@ -63,17 +57,12 @@ export function streamText(
63
  return { ...message, content };
64
  }
65
 
66
- return message;
67
  });
68
 
69
  const modelDetails = MODEL_LIST.find((m) => m.name === currentModel);
70
 
71
-
72
-
73
- const dynamicMaxTokens =
74
- modelDetails && modelDetails.maxTokenAllowed
75
- ? modelDetails.maxTokenAllowed
76
- : MAX_TOKENS;
77
 
78
  return _streamText({
79
  model: getModel(currentProvider, currentModel, env, apiKeys),
 
1
+ // eslint-disable-next-line @typescript-eslint/ban-ts-comment
2
+ // @ts-nocheck TODO: Provider proper types
3
+
4
  import { streamText as _streamText, convertToCoreMessages } from 'ai';
5
  import { getModel } from '~/lib/.server/llm/model';
6
  import { MAX_TOKENS } from './constants';
 
35
  const provider = providerMatch ? providerMatch[1] : DEFAULT_PROVIDER;
36
 
37
  // Remove model and provider lines from content
38
+ const cleanedContent = message.content.replace(MODEL_REGEX, '').replace(PROVIDER_REGEX, '').trim();
 
 
 
39
 
40
  return { model, provider, content: cleanedContent };
41
  }
42
+
43
+ export function streamText(messages: Messages, env: Env, options?: StreamingOptions, apiKeys?: Record<string, string>) {
 
 
 
 
44
  let currentModel = DEFAULT_MODEL;
45
  let currentProvider = DEFAULT_PROVIDER;
46
 
 
57
  return { ...message, content };
58
  }
59
 
60
+ return message;
61
  });
62
 
63
  const modelDetails = MODEL_LIST.find((m) => m.name === currentModel);
64
 
65
+ const dynamicMaxTokens = modelDetails && modelDetails.maxTokenAllowed ? modelDetails.maxTokenAllowed : MAX_TOKENS;
 
 
 
 
 
66
 
67
  return _streamText({
68
  model: getModel(currentProvider, currentModel, env, apiKeys),
app/lib/persistence/db.ts CHANGED
@@ -161,11 +161,17 @@ async function getUrlIds(db: IDBDatabase): Promise<string[]> {
161
 
162
  export async function forkChat(db: IDBDatabase, chatId: string, messageId: string): Promise<string> {
163
  const chat = await getMessages(db, chatId);
164
- if (!chat) throw new Error('Chat not found');
 
 
 
165
 
166
  // Find the index of the message to fork at
167
- const messageIndex = chat.messages.findIndex(msg => msg.id === messageId);
168
- if (messageIndex === -1) throw new Error('Message not found');
 
 
 
169
 
170
  // Get messages up to and including the selected message
171
  const messages = chat.messages.slice(0, messageIndex + 1);
@@ -175,19 +181,14 @@ export async function forkChat(db: IDBDatabase, chatId: string, messageId: strin
175
  const urlId = await getUrlId(db, newId);
176
 
177
  // Create the forked chat
178
- await setMessages(
179
- db,
180
- newId,
181
- messages,
182
- urlId,
183
- chat.description ? `${chat.description} (fork)` : 'Forked chat'
184
- );
185
 
186
  return urlId;
187
  }
188
 
189
  export async function duplicateChat(db: IDBDatabase, id: string): Promise<string> {
190
  const chat = await getMessages(db, id);
 
191
  if (!chat) {
192
  throw new Error('Chat not found');
193
  }
@@ -200,7 +201,7 @@ export async function duplicateChat(db: IDBDatabase, id: string): Promise<string
200
  newId,
201
  chat.messages,
202
  newUrlId, // Use the new urlId
203
- `${chat.description || 'Chat'} (copy)`
204
  );
205
 
206
  return newUrlId; // Return the urlId instead of id for navigation
 
161
 
162
  export async function forkChat(db: IDBDatabase, chatId: string, messageId: string): Promise<string> {
163
  const chat = await getMessages(db, chatId);
164
+
165
+ if (!chat) {
166
+ throw new Error('Chat not found');
167
+ }
168
 
169
  // Find the index of the message to fork at
170
+ const messageIndex = chat.messages.findIndex((msg) => msg.id === messageId);
171
+
172
+ if (messageIndex === -1) {
173
+ throw new Error('Message not found');
174
+ }
175
 
176
  // Get messages up to and including the selected message
177
  const messages = chat.messages.slice(0, messageIndex + 1);
 
181
  const urlId = await getUrlId(db, newId);
182
 
183
  // Create the forked chat
184
+ await setMessages(db, newId, messages, urlId, chat.description ? `${chat.description} (fork)` : 'Forked chat');
 
 
 
 
 
 
185
 
186
  return urlId;
187
  }
188
 
189
  export async function duplicateChat(db: IDBDatabase, id: string): Promise<string> {
190
  const chat = await getMessages(db, id);
191
+
192
  if (!chat) {
193
  throw new Error('Chat not found');
194
  }
 
201
  newId,
202
  chat.messages,
203
  newUrlId, // Use the new urlId
204
+ `${chat.description || 'Chat'} (copy)`,
205
  );
206
 
207
  return newUrlId; // Return the urlId instead of id for navigation
app/lib/persistence/useChatHistory.ts CHANGED
@@ -99,7 +99,7 @@ export function useChatHistory() {
99
 
100
  await setMessages(db, chatId.get() as string, messages, urlId, description.get());
101
  },
102
- duplicateCurrentChat: async (listItemId:string) => {
103
  if (!db || (!mixedId && !listItemId)) {
104
  return;
105
  }
@@ -110,8 +110,9 @@ export function useChatHistory() {
110
  toast.success('Chat duplicated successfully');
111
  } catch (error) {
112
  toast.error('Failed to duplicate chat');
 
113
  }
114
- }
115
  };
116
  }
117
 
 
99
 
100
  await setMessages(db, chatId.get() as string, messages, urlId, description.get());
101
  },
102
+ duplicateCurrentChat: async (listItemId: string) => {
103
  if (!db || (!mixedId && !listItemId)) {
104
  return;
105
  }
 
110
  toast.success('Chat duplicated successfully');
111
  } catch (error) {
112
  toast.error('Failed to duplicate chat');
113
+ console.log(error);
114
  }
115
+ },
116
  };
117
  }
118
 
app/lib/runtime/action-runner.ts CHANGED
@@ -1,11 +1,10 @@
1
- import { WebContainer, type WebContainerProcess } from '@webcontainer/api';
2
  import { atom, map, type MapStore } from 'nanostores';
3
  import * as nodePath from 'node:path';
4
  import type { BoltAction } from '~/types/actions';
5
  import { createScopedLogger } from '~/utils/logger';
6
  import { unreachable } from '~/utils/unreachable';
7
  import type { ActionCallbackData } from './message-parser';
8
- import type { ITerminal } from '~/types/terminal';
9
  import type { BoltShell } from '~/utils/shell';
10
 
11
  const logger = createScopedLogger('ActionRunner');
@@ -45,7 +44,6 @@ export class ActionRunner {
45
  constructor(webcontainerPromise: Promise<WebContainer>, getShellTerminal: () => BoltShell) {
46
  this.#webcontainer = webcontainerPromise;
47
  this.#shellTerminal = getShellTerminal;
48
-
49
  }
50
 
51
  addAction(data: ActionCallbackData) {
@@ -88,19 +86,21 @@ export class ActionRunner {
88
  if (action.executed) {
89
  return;
90
  }
 
91
  if (isStreaming && action.type !== 'file') {
92
  return;
93
  }
94
 
95
  this.#updateAction(actionId, { ...action, ...data.action, executed: !isStreaming });
96
 
97
- return this.#currentExecutionPromise = this.#currentExecutionPromise
 
98
  .then(() => {
99
- return this.#executeAction(actionId, isStreaming);
100
  })
101
  .catch((error) => {
102
  console.error('Action failed:', error);
103
- });
104
  }
105
 
106
  async #executeAction(actionId: string, isStreaming: boolean = false) {
@@ -121,17 +121,23 @@ export class ActionRunner {
121
  case 'start': {
122
  // making the start app non blocking
123
 
124
- this.#runStartAction(action).then(()=>this.#updateAction(actionId, { status: 'complete' }))
125
- .catch(()=>this.#updateAction(actionId, { status: 'failed', error: 'Action failed' }))
126
- // adding a delay to avoid any race condition between 2 start actions
127
- // i am up for a better approch
128
- await new Promise(resolve=>setTimeout(resolve,2000))
129
- return
130
- break;
 
 
 
 
131
  }
132
  }
133
 
134
- this.#updateAction(actionId, { status: isStreaming ? 'running' : action.abortSignal.aborted ? 'aborted' : 'complete' });
 
 
135
  } catch (error) {
136
  this.#updateAction(actionId, { status: 'failed', error: 'Action failed' });
137
  logger.error(`[${action.type}]:Action failed\n\n`, error);
@@ -145,16 +151,19 @@ export class ActionRunner {
145
  if (action.type !== 'shell') {
146
  unreachable('Expected shell action');
147
  }
148
- const shell = this.#shellTerminal()
149
- await shell.ready()
 
 
150
  if (!shell || !shell.terminal || !shell.process) {
151
  unreachable('Shell terminal not found');
152
  }
153
- const resp = await shell.executeCommand(this.runnerId.get(), action.content)
154
- logger.debug(`${action.type} Shell Response: [exit code:${resp?.exitCode}]`)
155
- if (resp?.exitCode != 0) {
156
- throw new Error("Failed To Execute Shell Command");
157
 
 
 
 
 
 
158
  }
159
  }
160
 
@@ -162,21 +171,26 @@ export class ActionRunner {
162
  if (action.type !== 'start') {
163
  unreachable('Expected shell action');
164
  }
 
165
  if (!this.#shellTerminal) {
166
  unreachable('Shell terminal not found');
167
  }
168
- const shell = this.#shellTerminal()
169
- await shell.ready()
 
 
170
  if (!shell || !shell.terminal || !shell.process) {
171
  unreachable('Shell terminal not found');
172
  }
173
- const resp = await shell.executeCommand(this.runnerId.get(), action.content)
174
- logger.debug(`${action.type} Shell Response: [exit code:${resp?.exitCode}]`)
 
175
 
176
  if (resp?.exitCode != 0) {
177
- throw new Error("Failed To Start Application");
178
  }
179
- return resp
 
180
  }
181
 
182
  async #runFileAction(action: ActionState) {
 
1
+ import { WebContainer } from '@webcontainer/api';
2
  import { atom, map, type MapStore } from 'nanostores';
3
  import * as nodePath from 'node:path';
4
  import type { BoltAction } from '~/types/actions';
5
  import { createScopedLogger } from '~/utils/logger';
6
  import { unreachable } from '~/utils/unreachable';
7
  import type { ActionCallbackData } from './message-parser';
 
8
  import type { BoltShell } from '~/utils/shell';
9
 
10
  const logger = createScopedLogger('ActionRunner');
 
44
  constructor(webcontainerPromise: Promise<WebContainer>, getShellTerminal: () => BoltShell) {
45
  this.#webcontainer = webcontainerPromise;
46
  this.#shellTerminal = getShellTerminal;
 
47
  }
48
 
49
  addAction(data: ActionCallbackData) {
 
86
  if (action.executed) {
87
  return;
88
  }
89
+
90
  if (isStreaming && action.type !== 'file') {
91
  return;
92
  }
93
 
94
  this.#updateAction(actionId, { ...action, ...data.action, executed: !isStreaming });
95
 
96
+ // eslint-disable-next-line consistent-return
97
+ return (this.#currentExecutionPromise = this.#currentExecutionPromise
98
  .then(() => {
99
+ this.#executeAction(actionId, isStreaming);
100
  })
101
  .catch((error) => {
102
  console.error('Action failed:', error);
103
+ }));
104
  }
105
 
106
  async #executeAction(actionId: string, isStreaming: boolean = false) {
 
121
  case 'start': {
122
  // making the start app non blocking
123
 
124
+ this.#runStartAction(action)
125
+ .then(() => this.#updateAction(actionId, { status: 'complete' }))
126
+ .catch(() => this.#updateAction(actionId, { status: 'failed', error: 'Action failed' }));
127
+
128
+ /*
129
+ * adding a delay to avoid any race condition between 2 start actions
130
+ * i am up for a better approach
131
+ */
132
+ await new Promise((resolve) => setTimeout(resolve, 2000));
133
+
134
+ return;
135
  }
136
  }
137
 
138
+ this.#updateAction(actionId, {
139
+ status: isStreaming ? 'running' : action.abortSignal.aborted ? 'aborted' : 'complete',
140
+ });
141
  } catch (error) {
142
  this.#updateAction(actionId, { status: 'failed', error: 'Action failed' });
143
  logger.error(`[${action.type}]:Action failed\n\n`, error);
 
151
  if (action.type !== 'shell') {
152
  unreachable('Expected shell action');
153
  }
154
+
155
+ const shell = this.#shellTerminal();
156
+ await shell.ready();
157
+
158
  if (!shell || !shell.terminal || !shell.process) {
159
  unreachable('Shell terminal not found');
160
  }
 
 
 
 
161
 
162
+ const resp = await shell.executeCommand(this.runnerId.get(), action.content);
163
+ logger.debug(`${action.type} Shell Response: [exit code:${resp?.exitCode}]`);
164
+
165
+ if (resp?.exitCode != 0) {
166
+ throw new Error('Failed To Execute Shell Command');
167
  }
168
  }
169
 
 
171
  if (action.type !== 'start') {
172
  unreachable('Expected shell action');
173
  }
174
+
175
  if (!this.#shellTerminal) {
176
  unreachable('Shell terminal not found');
177
  }
178
+
179
+ const shell = this.#shellTerminal();
180
+ await shell.ready();
181
+
182
  if (!shell || !shell.terminal || !shell.process) {
183
  unreachable('Shell terminal not found');
184
  }
185
+
186
+ const resp = await shell.executeCommand(this.runnerId.get(), action.content);
187
+ logger.debug(`${action.type} Shell Response: [exit code:${resp?.exitCode}]`);
188
 
189
  if (resp?.exitCode != 0) {
190
+ throw new Error('Failed To Start Application');
191
  }
192
+
193
+ return resp;
194
  }
195
 
196
  async #runFileAction(action: ActionState) {
app/lib/runtime/message-parser.ts CHANGED
@@ -55,7 +55,7 @@ interface MessageState {
55
  export class StreamingMessageParser {
56
  #messages = new Map<string, MessageState>();
57
 
58
- constructor(private _options: StreamingMessageParserOptions = {}) { }
59
 
60
  parse(messageId: string, input: string) {
61
  let state = this.#messages.get(messageId);
@@ -120,20 +120,20 @@ export class StreamingMessageParser {
120
  i = closeIndex + ARTIFACT_ACTION_TAG_CLOSE.length;
121
  } else {
122
  if ('type' in currentAction && currentAction.type === 'file') {
123
- let content = input.slice(i);
124
 
125
  this._options.callbacks?.onActionStream?.({
126
  artifactId: currentArtifact.id,
127
  messageId,
128
  actionId: String(state.actionId - 1),
129
  action: {
130
- ...currentAction as FileAction,
131
  content,
132
  filePath: currentAction.filePath,
133
  },
134
-
135
  });
136
  }
 
137
  break;
138
  }
139
  } else {
@@ -272,7 +272,7 @@ export class StreamingMessageParser {
272
  }
273
 
274
  (actionAttributes as FileAction).filePath = filePath;
275
- } else if (!(['shell', 'start'].includes(actionType))) {
276
  logger.warn(`Unknown action type '${actionType}'`);
277
  }
278
 
 
55
  export class StreamingMessageParser {
56
  #messages = new Map<string, MessageState>();
57
 
58
+ constructor(private _options: StreamingMessageParserOptions = {}) {}
59
 
60
  parse(messageId: string, input: string) {
61
  let state = this.#messages.get(messageId);
 
120
  i = closeIndex + ARTIFACT_ACTION_TAG_CLOSE.length;
121
  } else {
122
  if ('type' in currentAction && currentAction.type === 'file') {
123
+ const content = input.slice(i);
124
 
125
  this._options.callbacks?.onActionStream?.({
126
  artifactId: currentArtifact.id,
127
  messageId,
128
  actionId: String(state.actionId - 1),
129
  action: {
130
+ ...(currentAction as FileAction),
131
  content,
132
  filePath: currentAction.filePath,
133
  },
 
134
  });
135
  }
136
+
137
  break;
138
  }
139
  } else {
 
272
  }
273
 
274
  (actionAttributes as FileAction).filePath = filePath;
275
+ } else if (!['shell', 'start'].includes(actionType)) {
276
  logger.warn(`Unknown action type '${actionType}'`);
277
  }
278
 
app/lib/stores/terminal.ts CHANGED
@@ -7,7 +7,7 @@ import { coloredText } from '~/utils/terminal';
7
  export class TerminalStore {
8
  #webcontainer: Promise<WebContainer>;
9
  #terminals: Array<{ terminal: ITerminal; process: WebContainerProcess }> = [];
10
- #boltTerminal = newBoltShellProcess()
11
 
12
  showTerminal: WritableAtom<boolean> = import.meta.hot?.data.showTerminal ?? atom(true);
13
 
@@ -27,8 +27,8 @@ export class TerminalStore {
27
  }
28
  async attachBoltTerminal(terminal: ITerminal) {
29
  try {
30
- let wc = await this.#webcontainer
31
- await this.#boltTerminal.init(wc, terminal)
32
  } catch (error: any) {
33
  terminal.write(coloredText.red('Failed to spawn bolt shell\n\n') + error.message);
34
  return;
 
7
  export class TerminalStore {
8
  #webcontainer: Promise<WebContainer>;
9
  #terminals: Array<{ terminal: ITerminal; process: WebContainerProcess }> = [];
10
+ #boltTerminal = newBoltShellProcess();
11
 
12
  showTerminal: WritableAtom<boolean> = import.meta.hot?.data.showTerminal ?? atom(true);
13
 
 
27
  }
28
  async attachBoltTerminal(terminal: ITerminal) {
29
  try {
30
+ const wc = await this.#webcontainer;
31
+ await this.#boltTerminal.init(wc, terminal);
32
  } catch (error: any) {
33
  terminal.write(coloredText.red('Failed to spawn bolt shell\n\n') + error.message);
34
  return;
app/lib/stores/workbench.ts CHANGED
@@ -11,9 +11,8 @@ import { PreviewsStore } from './previews';
11
  import { TerminalStore } from './terminal';
12
  import JSZip from 'jszip';
13
  import { saveAs } from 'file-saver';
14
- import { Octokit, type RestEndpointMethodTypes } from "@octokit/rest";
15
  import * as nodePath from 'node:path';
16
- import type { WebContainerProcess } from '@webcontainer/api';
17
  import { extractRelativePath } from '~/utils/diff';
18
 
19
  export interface ArtifactState {
@@ -42,8 +41,7 @@ export class WorkbenchStore {
42
  unsavedFiles: WritableAtom<Set<string>> = import.meta.hot?.data.unsavedFiles ?? atom(new Set<string>());
43
  modifiedFiles = new Set<string>();
44
  artifactIdList: string[] = [];
45
- #boltTerminal: { terminal: ITerminal; process: WebContainerProcess } | undefined;
46
- #globalExecutionQueue=Promise.resolve();
47
  constructor() {
48
  if (import.meta.hot) {
49
  import.meta.hot.data.artifacts = this.artifacts;
@@ -54,7 +52,7 @@ export class WorkbenchStore {
54
  }
55
 
56
  addToExecutionQueue(callback: () => Promise<void>) {
57
- this.#globalExecutionQueue=this.#globalExecutionQueue.then(()=>callback())
58
  }
59
 
60
  get previews() {
@@ -96,7 +94,6 @@ export class WorkbenchStore {
96
  this.#terminalStore.attachTerminal(terminal);
97
  }
98
  attachBoltTerminal(terminal: ITerminal) {
99
-
100
  this.#terminalStore.attachBoltTerminal(terminal);
101
  }
102
 
@@ -261,7 +258,8 @@ export class WorkbenchStore {
261
  this.artifacts.setKey(messageId, { ...artifact, ...state });
262
  }
263
  addAction(data: ActionCallbackData) {
264
- this._addAction(data)
 
265
  // this.addToExecutionQueue(()=>this._addAction(data))
266
  }
267
  async _addAction(data: ActionCallbackData) {
@@ -277,11 +275,10 @@ export class WorkbenchStore {
277
  }
278
 
279
  runAction(data: ActionCallbackData, isStreaming: boolean = false) {
280
- if(isStreaming) {
281
- this._runAction(data, isStreaming)
282
- }
283
- else{
284
- this.addToExecutionQueue(()=>this._runAction(data, isStreaming))
285
  }
286
  }
287
  async _runAction(data: ActionCallbackData, isStreaming: boolean = false) {
@@ -292,16 +289,21 @@ export class WorkbenchStore {
292
  if (!artifact) {
293
  unreachable('Artifact not found');
294
  }
 
295
  if (data.action.type === 'file') {
296
- let wc = await webcontainer
297
  const fullPath = nodePath.join(wc.workdir, data.action.filePath);
 
298
  if (this.selectedFile.value !== fullPath) {
299
  this.setSelectedFile(fullPath);
300
  }
 
301
  if (this.currentView.value !== 'code') {
302
  this.currentView.set('code');
303
  }
 
304
  const doc = this.#editorStore.documents.get()[fullPath];
 
305
  if (!doc) {
306
  await artifact.runner.runAction(data, isStreaming);
307
  }
@@ -382,7 +384,6 @@ export class WorkbenchStore {
382
  }
383
 
384
  async pushToGitHub(repoName: string, githubUsername: string, ghToken: string) {
385
-
386
  try {
387
  // Get the GitHub auth token from environment variables
388
  const githubToken = ghToken;
@@ -397,10 +398,11 @@ export class WorkbenchStore {
397
  const octokit = new Octokit({ auth: githubToken });
398
 
399
  // Check if the repository already exists before creating it
400
- let repo: RestEndpointMethodTypes["repos"]["get"]["response"]['data']
 
401
  try {
402
- let resp = await octokit.repos.get({ owner: owner, repo: repoName });
403
- repo = resp.data
404
  } catch (error) {
405
  if (error instanceof Error && 'status' in error && error.status === 404) {
406
  // Repository doesn't exist, so create a new one
@@ -418,6 +420,7 @@ export class WorkbenchStore {
418
 
419
  // Get all files
420
  const files = this.files.get();
 
421
  if (!files || Object.keys(files).length === 0) {
422
  throw new Error('No files found to push');
423
  }
@@ -434,7 +437,9 @@ export class WorkbenchStore {
434
  });
435
  return { path: extractRelativePath(filePath), sha: blob.sha };
436
  }
437
- })
 
 
438
  );
439
 
440
  const validBlobs = blobs.filter(Boolean); // Filter out any undefined blobs
 
11
  import { TerminalStore } from './terminal';
12
  import JSZip from 'jszip';
13
  import { saveAs } from 'file-saver';
14
+ import { Octokit, type RestEndpointMethodTypes } from '@octokit/rest';
15
  import * as nodePath from 'node:path';
 
16
  import { extractRelativePath } from '~/utils/diff';
17
 
18
  export interface ArtifactState {
 
41
  unsavedFiles: WritableAtom<Set<string>> = import.meta.hot?.data.unsavedFiles ?? atom(new Set<string>());
42
  modifiedFiles = new Set<string>();
43
  artifactIdList: string[] = [];
44
+ #globalExecutionQueue = Promise.resolve();
 
45
  constructor() {
46
  if (import.meta.hot) {
47
  import.meta.hot.data.artifacts = this.artifacts;
 
52
  }
53
 
54
  addToExecutionQueue(callback: () => Promise<void>) {
55
+ this.#globalExecutionQueue = this.#globalExecutionQueue.then(() => callback());
56
  }
57
 
58
  get previews() {
 
94
  this.#terminalStore.attachTerminal(terminal);
95
  }
96
  attachBoltTerminal(terminal: ITerminal) {
 
97
  this.#terminalStore.attachBoltTerminal(terminal);
98
  }
99
 
 
258
  this.artifacts.setKey(messageId, { ...artifact, ...state });
259
  }
260
  addAction(data: ActionCallbackData) {
261
+ this._addAction(data);
262
+
263
  // this.addToExecutionQueue(()=>this._addAction(data))
264
  }
265
  async _addAction(data: ActionCallbackData) {
 
275
  }
276
 
277
  runAction(data: ActionCallbackData, isStreaming: boolean = false) {
278
+ if (isStreaming) {
279
+ this._runAction(data, isStreaming);
280
+ } else {
281
+ this.addToExecutionQueue(() => this._runAction(data, isStreaming));
 
282
  }
283
  }
284
  async _runAction(data: ActionCallbackData, isStreaming: boolean = false) {
 
289
  if (!artifact) {
290
  unreachable('Artifact not found');
291
  }
292
+
293
  if (data.action.type === 'file') {
294
+ const wc = await webcontainer;
295
  const fullPath = nodePath.join(wc.workdir, data.action.filePath);
296
+
297
  if (this.selectedFile.value !== fullPath) {
298
  this.setSelectedFile(fullPath);
299
  }
300
+
301
  if (this.currentView.value !== 'code') {
302
  this.currentView.set('code');
303
  }
304
+
305
  const doc = this.#editorStore.documents.get()[fullPath];
306
+
307
  if (!doc) {
308
  await artifact.runner.runAction(data, isStreaming);
309
  }
 
384
  }
385
 
386
  async pushToGitHub(repoName: string, githubUsername: string, ghToken: string) {
 
387
  try {
388
  // Get the GitHub auth token from environment variables
389
  const githubToken = ghToken;
 
398
  const octokit = new Octokit({ auth: githubToken });
399
 
400
  // Check if the repository already exists before creating it
401
+ let repo: RestEndpointMethodTypes['repos']['get']['response']['data'];
402
+
403
  try {
404
+ const resp = await octokit.repos.get({ owner, repo: repoName });
405
+ repo = resp.data;
406
  } catch (error) {
407
  if (error instanceof Error && 'status' in error && error.status === 404) {
408
  // Repository doesn't exist, so create a new one
 
420
 
421
  // Get all files
422
  const files = this.files.get();
423
+
424
  if (!files || Object.keys(files).length === 0) {
425
  throw new Error('No files found to push');
426
  }
 
437
  });
438
  return { path: extractRelativePath(filePath), sha: blob.sha };
439
  }
440
+
441
+ return null;
442
+ }),
443
  );
444
 
445
  const validBlobs = blobs.filter(Boolean); // Filter out any undefined blobs
app/routes/api.chat.ts CHANGED
@@ -1,5 +1,6 @@
1
- // @ts-nocheck
2
- // Preventing TS checks with files presented in the video for a better presentation.
 
3
  import { type ActionFunctionArgs } from '@remix-run/cloudflare';
4
  import { MAX_RESPONSE_SEGMENTS, MAX_TOKENS } from '~/lib/.server/llm/constants';
5
  import { CONTINUE_PROMPT } from '~/lib/.server/llm/prompts';
@@ -14,14 +15,15 @@ function parseCookies(cookieHeader) {
14
  const cookies = {};
15
 
16
  // Split the cookie string by semicolons and spaces
17
- const items = cookieHeader.split(";").map(cookie => cookie.trim());
 
 
 
18
 
19
- items.forEach(item => {
20
- const [name, ...rest] = item.split("=");
21
  if (name && rest) {
22
  // Decode the name and value, and join value parts in case it contains '='
23
  const decodedName = decodeURIComponent(name.trim());
24
- const decodedValue = decodeURIComponent(rest.join("=").trim());
25
  cookies[decodedName] = decodedValue;
26
  }
27
  });
@@ -31,13 +33,13 @@ function parseCookies(cookieHeader) {
31
 
32
  async function chatAction({ context, request }: ActionFunctionArgs) {
33
  const { messages } = await request.json<{
34
- messages: Messages
35
  }>();
36
 
37
- const cookieHeader = request.headers.get("Cookie");
38
 
39
  // Parse the cookie's value (returns an object or null if no cookie exists)
40
- const apiKeys = JSON.parse(parseCookies(cookieHeader).apiKeys || "{}");
41
 
42
  const stream = new SwitchableStream();
43
 
@@ -83,7 +85,7 @@ async function chatAction({ context, request }: ActionFunctionArgs) {
83
  if (error.message?.includes('API key')) {
84
  throw new Response('Invalid or missing API key', {
85
  status: 401,
86
- statusText: 'Unauthorized'
87
  });
88
  }
89
 
 
1
+ // eslint-disable-next-line @typescript-eslint/ban-ts-comment
2
+ // @ts-nocheck TODO: Provider proper types
3
+
4
  import { type ActionFunctionArgs } from '@remix-run/cloudflare';
5
  import { MAX_RESPONSE_SEGMENTS, MAX_TOKENS } from '~/lib/.server/llm/constants';
6
  import { CONTINUE_PROMPT } from '~/lib/.server/llm/prompts';
 
15
  const cookies = {};
16
 
17
  // Split the cookie string by semicolons and spaces
18
+ const items = cookieHeader.split(';').map((cookie) => cookie.trim());
19
+
20
+ items.forEach((item) => {
21
+ const [name, ...rest] = item.split('=');
22
 
 
 
23
  if (name && rest) {
24
  // Decode the name and value, and join value parts in case it contains '='
25
  const decodedName = decodeURIComponent(name.trim());
26
+ const decodedValue = decodeURIComponent(rest.join('=').trim());
27
  cookies[decodedName] = decodedValue;
28
  }
29
  });
 
33
 
34
  async function chatAction({ context, request }: ActionFunctionArgs) {
35
  const { messages } = await request.json<{
36
+ messages: Messages;
37
  }>();
38
 
39
+ const cookieHeader = request.headers.get('Cookie');
40
 
41
  // Parse the cookie's value (returns an object or null if no cookie exists)
42
+ const apiKeys = JSON.parse(parseCookies(cookieHeader).apiKeys || '{}');
43
 
44
  const stream = new SwitchableStream();
45
 
 
85
  if (error.message?.includes('API key')) {
86
  throw new Response('Invalid or missing API key', {
87
  status: 401,
88
+ statusText: 'Unauthorized',
89
  });
90
  }
91
 
app/types/model.ts CHANGED
@@ -1,10 +1,10 @@
1
  import type { ModelInfo } from '~/utils/types';
2
 
3
  export type ProviderInfo = {
4
- staticModels: ModelInfo[],
5
- name: string,
6
- getDynamicModels?: () => Promise<ModelInfo[]>,
7
- getApiKeyLink?: string,
8
- labelForGetApiKey?: string,
9
- icon?:string,
10
  };
 
1
  import type { ModelInfo } from '~/utils/types';
2
 
3
  export type ProviderInfo = {
4
+ staticModels: ModelInfo[];
5
+ name: string;
6
+ getDynamicModels?: () => Promise<ModelInfo[]>;
7
+ getApiKeyLink?: string;
8
+ labelForGetApiKey?: string;
9
+ icon?: string;
10
  };
app/utils/constants.ts CHANGED
@@ -12,26 +12,42 @@ const PROVIDER_LIST: ProviderInfo[] = [
12
  {
13
  name: 'Anthropic',
14
  staticModels: [
15
- { name: 'claude-3-5-sonnet-latest', label: 'Claude 3.5 Sonnet (new)', provider: 'Anthropic', maxTokenAllowed: 8000 },
16
- { name: 'claude-3-5-sonnet-20240620', label: 'Claude 3.5 Sonnet (old)', provider: 'Anthropic', maxTokenAllowed: 8000 },
17
- { name: 'claude-3-5-haiku-latest', label: 'Claude 3.5 Haiku (new)', provider: 'Anthropic', maxTokenAllowed: 8000 },
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
18
  { name: 'claude-3-opus-latest', label: 'Claude 3 Opus', provider: 'Anthropic', maxTokenAllowed: 8000 },
19
  { name: 'claude-3-sonnet-20240229', label: 'Claude 3 Sonnet', provider: 'Anthropic', maxTokenAllowed: 8000 },
20
- { name: 'claude-3-haiku-20240307', label: 'Claude 3 Haiku', provider: 'Anthropic', maxTokenAllowed: 8000 }
21
  ],
22
- getApiKeyLink: "https://console.anthropic.com/settings/keys",
23
  },
24
  {
25
  name: 'Ollama',
26
  staticModels: [],
27
  getDynamicModels: getOllamaModels,
28
- getApiKeyLink: "https://ollama.com/download",
29
- labelForGetApiKey: "Download Ollama",
30
- icon: "i-ph:cloud-arrow-down",
31
- }, {
 
32
  name: 'OpenAILike',
33
  staticModels: [],
34
- getDynamicModels: getOpenAILikeModels
35
  },
36
  {
37
  name: 'Cohere',
@@ -47,7 +63,7 @@ const PROVIDER_LIST: ProviderInfo[] = [
47
  { name: 'c4ai-aya-expanse-8b', label: 'c4AI Aya Expanse 8b', provider: 'Cohere', maxTokenAllowed: 4096 },
48
  { name: 'c4ai-aya-expanse-32b', label: 'c4AI Aya Expanse 32b', provider: 'Cohere', maxTokenAllowed: 4096 },
49
  ],
50
- getApiKeyLink: 'https://dashboard.cohere.com/api-keys'
51
  },
52
  {
53
  name: 'OpenRouter',
@@ -56,22 +72,52 @@ const PROVIDER_LIST: ProviderInfo[] = [
56
  {
57
  name: 'anthropic/claude-3.5-sonnet',
58
  label: 'Anthropic: Claude 3.5 Sonnet (OpenRouter)',
59
- provider: 'OpenRouter'
60
- , maxTokenAllowed: 8000
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
61
  },
62
- { name: 'anthropic/claude-3-haiku', label: 'Anthropic: Claude 3 Haiku (OpenRouter)', provider: 'OpenRouter', maxTokenAllowed: 8000 },
63
- { name: 'deepseek/deepseek-coder', label: 'Deepseek-Coder V2 236B (OpenRouter)', provider: 'OpenRouter', maxTokenAllowed: 8000 },
64
- { name: 'google/gemini-flash-1.5', label: 'Google Gemini Flash 1.5 (OpenRouter)', provider: 'OpenRouter', maxTokenAllowed: 8000 },
65
- { name: 'google/gemini-pro-1.5', label: 'Google Gemini Pro 1.5 (OpenRouter)', provider: 'OpenRouter', maxTokenAllowed: 8000 },
66
  { name: 'x-ai/grok-beta', label: 'xAI Grok Beta (OpenRouter)', provider: 'OpenRouter', maxTokenAllowed: 8000 },
67
- { name: 'mistralai/mistral-nemo', label: 'OpenRouter Mistral Nemo (OpenRouter)', provider: 'OpenRouter', maxTokenAllowed: 8000 },
68
- { name: 'qwen/qwen-110b-chat', label: 'OpenRouter Qwen 110b Chat (OpenRouter)', provider: 'OpenRouter', maxTokenAllowed: 8000 },
69
- { name: 'cohere/command', label: 'Cohere Command (OpenRouter)', provider: 'OpenRouter', maxTokenAllowed: 4096 }
 
 
 
 
 
 
 
 
 
 
70
  ],
71
  getDynamicModels: getOpenRouterModels,
72
  getApiKeyLink: 'https://openrouter.ai/settings/keys',
73
-
74
- }, {
75
  name: 'Google',
76
  staticModels: [
77
  { name: 'gemini-1.5-flash-latest', label: 'Gemini 1.5 Flash', provider: 'Google', maxTokenAllowed: 8192 },
@@ -79,32 +125,92 @@ const PROVIDER_LIST: ProviderInfo[] = [
79
  { name: 'gemini-1.5-flash-8b', label: 'Gemini 1.5 Flash-8b', provider: 'Google', maxTokenAllowed: 8192 },
80
  { name: 'gemini-1.5-pro-latest', label: 'Gemini 1.5 Pro', provider: 'Google', maxTokenAllowed: 8192 },
81
  { name: 'gemini-1.5-pro-002', label: 'Gemini 1.5 Pro-002', provider: 'Google', maxTokenAllowed: 8192 },
82
- { name: 'gemini-exp-1121', label: 'Gemini exp-1121', provider: 'Google', maxTokenAllowed: 8192 }
83
  ],
84
- getApiKeyLink: 'https://aistudio.google.com/app/apikey'
85
- }, {
 
86
  name: 'Groq',
87
  staticModels: [
88
  { name: 'llama-3.1-70b-versatile', label: 'Llama 3.1 70b (Groq)', provider: 'Groq', maxTokenAllowed: 8000 },
89
  { name: 'llama-3.1-8b-instant', label: 'Llama 3.1 8b (Groq)', provider: 'Groq', maxTokenAllowed: 8000 },
90
  { name: 'llama-3.2-11b-vision-preview', label: 'Llama 3.2 11b (Groq)', provider: 'Groq', maxTokenAllowed: 8000 },
91
  { name: 'llama-3.2-3b-preview', label: 'Llama 3.2 3b (Groq)', provider: 'Groq', maxTokenAllowed: 8000 },
92
- { name: 'llama-3.2-1b-preview', label: 'Llama 3.2 1b (Groq)', provider: 'Groq', maxTokenAllowed: 8000 }
93
  ],
94
- getApiKeyLink: 'https://console.groq.com/keys'
95
  },
96
  {
97
  name: 'HuggingFace',
98
  staticModels: [
99
- { name: 'Qwen/Qwen2.5-Coder-32B-Instruct', label: 'Qwen2.5-Coder-32B-Instruct (HuggingFace)', provider: 'HuggingFace', maxTokenAllowed: 8000 },
100
- { name: 'Qwen/Qwen2.5-72B-Instruct', label: 'Qwen2.5-72B-Instruct (HuggingFace)', provider: 'HuggingFace', maxTokenAllowed: 8000 },
101
- { name: 'meta-llama/Llama-3.1-70B-Instruct', label: 'Llama-3.1-70B-Instruct (HuggingFace)', provider: 'HuggingFace', maxTokenAllowed: 8000 },
102
- { name: 'meta-llama/Llama-3.1-405B', label: 'Llama-3.1-405B (HuggingFace)', provider: 'HuggingFace', maxTokenAllowed: 8000 },
103
- { name: '01-ai/Yi-1.5-34B-Chat', label: 'Yi-1.5-34B-Chat (HuggingFace)', provider: 'HuggingFace', maxTokenAllowed: 8000 },
104
- { name: 'codellama/CodeLlama-34b-Instruct-hf', label: 'CodeLlama-34b-Instruct (HuggingFace)', provider: 'HuggingFace', maxTokenAllowed: 8000 },
105
- { name: 'NousResearch/Hermes-3-Llama-3.1-8B', label: 'Hermes-3-Llama-3.1-8B (HuggingFace)', provider: 'HuggingFace', maxTokenAllowed: 8000 }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
106
  ],
107
- getApiKeyLink: 'https://huggingface.co/settings/tokens'
108
  },
109
 
110
  {
@@ -113,23 +219,24 @@ const PROVIDER_LIST: ProviderInfo[] = [
113
  { name: 'gpt-4o-mini', label: 'GPT-4o Mini', provider: 'OpenAI', maxTokenAllowed: 8000 },
114
  { name: 'gpt-4-turbo', label: 'GPT-4 Turbo', provider: 'OpenAI', maxTokenAllowed: 8000 },
115
  { name: 'gpt-4', label: 'GPT-4', provider: 'OpenAI', maxTokenAllowed: 8000 },
116
- { name: 'gpt-3.5-turbo', label: 'GPT-3.5 Turbo', provider: 'OpenAI', maxTokenAllowed: 8000 }
117
  ],
118
- getApiKeyLink: "https://platform.openai.com/api-keys",
119
- }, {
 
120
  name: 'xAI',
121
- staticModels: [
122
- { name: 'grok-beta', label: 'xAI Grok Beta', provider: 'xAI', maxTokenAllowed: 8000 }
123
- ],
124
- getApiKeyLink: 'https://docs.x.ai/docs/quickstart#creating-an-api-key'
125
- }, {
126
  name: 'Deepseek',
127
  staticModels: [
128
  { name: 'deepseek-coder', label: 'Deepseek-Coder', provider: 'Deepseek', maxTokenAllowed: 8000 },
129
- { name: 'deepseek-chat', label: 'Deepseek-Chat', provider: 'Deepseek', maxTokenAllowed: 8000 }
130
  ],
131
- getApiKeyLink: 'https://platform.deepseek.com/api_keys'
132
- }, {
 
133
  name: 'Mistral',
134
  staticModels: [
135
  { name: 'open-mistral-7b', label: 'Mistral 7B', provider: 'Mistral', maxTokenAllowed: 8000 },
@@ -140,27 +247,29 @@ const PROVIDER_LIST: ProviderInfo[] = [
140
  { name: 'ministral-8b-latest', label: 'Mistral 8B', provider: 'Mistral', maxTokenAllowed: 8000 },
141
  { name: 'mistral-small-latest', label: 'Mistral Small', provider: 'Mistral', maxTokenAllowed: 8000 },
142
  { name: 'codestral-latest', label: 'Codestral', provider: 'Mistral', maxTokenAllowed: 8000 },
143
- { name: 'mistral-large-latest', label: 'Mistral Large Latest', provider: 'Mistral', maxTokenAllowed: 8000 }
144
  ],
145
- getApiKeyLink: 'https://console.mistral.ai/api-keys/'
146
- }, {
 
147
  name: 'LMStudio',
148
  staticModels: [],
149
  getDynamicModels: getLMStudioModels,
150
  getApiKeyLink: 'https://lmstudio.ai/',
151
  labelForGetApiKey: 'Get LMStudio',
152
- icon: "i-ph:cloud-arrow-down",
153
- }
154
  ];
155
 
156
  export const DEFAULT_PROVIDER = PROVIDER_LIST[0];
157
 
158
- const staticModels: ModelInfo[] = PROVIDER_LIST.map(p => p.staticModels).flat();
159
 
160
  export let MODEL_LIST: ModelInfo[] = [...staticModels];
161
 
162
  const getOllamaBaseUrl = () => {
163
  const defaultBaseUrl = import.meta.env.OLLAMA_API_BASE_URL || 'http://localhost:11434';
 
164
  // Check if we're in the browser
165
  if (typeof window !== 'undefined') {
166
  // Frontend always uses localhost
@@ -170,23 +279,22 @@ const getOllamaBaseUrl = () => {
170
  // Backend: Check if we're running in Docker
171
  const isDocker = process.env.RUNNING_IN_DOCKER === 'true';
172
 
173
- return isDocker
174
- ? defaultBaseUrl.replace('localhost', 'host.docker.internal')
175
- : defaultBaseUrl;
176
  };
177
 
178
  async function getOllamaModels(): Promise<ModelInfo[]> {
179
  try {
180
- const base_url = getOllamaBaseUrl();
181
- const response = await fetch(`${base_url}/api/tags`);
182
- const data = await response.json() as OllamaApiResponse;
183
 
184
  return data.models.map((model: OllamaModel) => ({
185
  name: model.name,
186
  label: `${model.name} (${model.details.parameter_size})`,
187
  provider: 'Ollama',
188
- maxTokenAllowed:8000,
189
  }));
 
190
  } catch (e) {
191
  return [];
192
  }
@@ -194,22 +302,26 @@ async function getOllamaModels(): Promise<ModelInfo[]> {
194
 
195
  async function getOpenAILikeModels(): Promise<ModelInfo[]> {
196
  try {
197
- const base_url = import.meta.env.OPENAI_LIKE_API_BASE_URL || '';
198
- if (!base_url) {
 
199
  return [];
200
  }
201
- const api_key = import.meta.env.OPENAI_LIKE_API_KEY ?? '';
202
- const response = await fetch(`${base_url}/models`, {
 
203
  headers: {
204
- Authorization: `Bearer ${api_key}`
205
- }
206
  });
207
- const res = await response.json() as any;
 
208
  return res.data.map((model: any) => ({
209
  name: model.id,
210
  label: model.id,
211
- provider: 'OpenAILike'
212
  }));
 
213
  } catch (e) {
214
  return [];
215
  }
@@ -223,51 +335,67 @@ type OpenRouterModelsResponse = {
223
  pricing: {
224
  prompt: number;
225
  completion: number;
226
- }
227
- }[]
228
  };
229
 
230
  async function getOpenRouterModels(): Promise<ModelInfo[]> {
231
- const data: OpenRouterModelsResponse = await (await fetch('https://openrouter.ai/api/v1/models', {
232
- headers: {
233
- 'Content-Type': 'application/json'
234
- }
235
- })).json();
 
 
236
 
237
- return data.data.sort((a, b) => a.name.localeCompare(b.name)).map(m => ({
238
- name: m.id,
239
- label: `${m.name} - in:$${(m.pricing.prompt * 1_000_000).toFixed(
240
- 2)} out:$${(m.pricing.completion * 1_000_000).toFixed(2)} - context ${Math.floor(
241
- m.context_length / 1000)}k`,
242
- provider: 'OpenRouter',
243
- maxTokenAllowed:8000,
244
- }));
 
 
245
  }
246
 
247
  async function getLMStudioModels(): Promise<ModelInfo[]> {
248
  try {
249
- const base_url = import.meta.env.LMSTUDIO_API_BASE_URL || 'http://localhost:1234';
250
- const response = await fetch(`${base_url}/v1/models`);
251
- const data = await response.json() as any;
 
252
  return data.data.map((model: any) => ({
253
  name: model.id,
254
  label: model.id,
255
- provider: 'LMStudio'
256
  }));
 
257
  } catch (e) {
258
  return [];
259
  }
260
  }
261
 
262
-
263
-
264
  async function initializeModelList(): Promise<ModelInfo[]> {
265
- MODEL_LIST = [...(await Promise.all(
266
- PROVIDER_LIST
267
- .filter((p): p is ProviderInfo & { getDynamicModels: () => Promise<ModelInfo[]> } => !!p.getDynamicModels)
268
- .map(p => p.getDynamicModels())))
269
- .flat(), ...staticModels];
 
 
 
 
 
270
  return MODEL_LIST;
271
  }
272
 
273
- export { getOllamaModels, getOpenAILikeModels, getLMStudioModels, initializeModelList, getOpenRouterModels, PROVIDER_LIST };
 
 
 
 
 
 
 
 
12
  {
13
  name: 'Anthropic',
14
  staticModels: [
15
+ {
16
+ name: 'claude-3-5-sonnet-latest',
17
+ label: 'Claude 3.5 Sonnet (new)',
18
+ provider: 'Anthropic',
19
+ maxTokenAllowed: 8000,
20
+ },
21
+ {
22
+ name: 'claude-3-5-sonnet-20240620',
23
+ label: 'Claude 3.5 Sonnet (old)',
24
+ provider: 'Anthropic',
25
+ maxTokenAllowed: 8000,
26
+ },
27
+ {
28
+ name: 'claude-3-5-haiku-latest',
29
+ label: 'Claude 3.5 Haiku (new)',
30
+ provider: 'Anthropic',
31
+ maxTokenAllowed: 8000,
32
+ },
33
  { name: 'claude-3-opus-latest', label: 'Claude 3 Opus', provider: 'Anthropic', maxTokenAllowed: 8000 },
34
  { name: 'claude-3-sonnet-20240229', label: 'Claude 3 Sonnet', provider: 'Anthropic', maxTokenAllowed: 8000 },
35
+ { name: 'claude-3-haiku-20240307', label: 'Claude 3 Haiku', provider: 'Anthropic', maxTokenAllowed: 8000 },
36
  ],
37
+ getApiKeyLink: 'https://console.anthropic.com/settings/keys',
38
  },
39
  {
40
  name: 'Ollama',
41
  staticModels: [],
42
  getDynamicModels: getOllamaModels,
43
+ getApiKeyLink: 'https://ollama.com/download',
44
+ labelForGetApiKey: 'Download Ollama',
45
+ icon: 'i-ph:cloud-arrow-down',
46
+ },
47
+ {
48
  name: 'OpenAILike',
49
  staticModels: [],
50
+ getDynamicModels: getOpenAILikeModels,
51
  },
52
  {
53
  name: 'Cohere',
 
63
  { name: 'c4ai-aya-expanse-8b', label: 'c4AI Aya Expanse 8b', provider: 'Cohere', maxTokenAllowed: 4096 },
64
  { name: 'c4ai-aya-expanse-32b', label: 'c4AI Aya Expanse 32b', provider: 'Cohere', maxTokenAllowed: 4096 },
65
  ],
66
+ getApiKeyLink: 'https://dashboard.cohere.com/api-keys',
67
  },
68
  {
69
  name: 'OpenRouter',
 
72
  {
73
  name: 'anthropic/claude-3.5-sonnet',
74
  label: 'Anthropic: Claude 3.5 Sonnet (OpenRouter)',
75
+ provider: 'OpenRouter',
76
+ maxTokenAllowed: 8000,
77
+ },
78
+ {
79
+ name: 'anthropic/claude-3-haiku',
80
+ label: 'Anthropic: Claude 3 Haiku (OpenRouter)',
81
+ provider: 'OpenRouter',
82
+ maxTokenAllowed: 8000,
83
+ },
84
+ {
85
+ name: 'deepseek/deepseek-coder',
86
+ label: 'Deepseek-Coder V2 236B (OpenRouter)',
87
+ provider: 'OpenRouter',
88
+ maxTokenAllowed: 8000,
89
+ },
90
+ {
91
+ name: 'google/gemini-flash-1.5',
92
+ label: 'Google Gemini Flash 1.5 (OpenRouter)',
93
+ provider: 'OpenRouter',
94
+ maxTokenAllowed: 8000,
95
+ },
96
+ {
97
+ name: 'google/gemini-pro-1.5',
98
+ label: 'Google Gemini Pro 1.5 (OpenRouter)',
99
+ provider: 'OpenRouter',
100
+ maxTokenAllowed: 8000,
101
  },
 
 
 
 
102
  { name: 'x-ai/grok-beta', label: 'xAI Grok Beta (OpenRouter)', provider: 'OpenRouter', maxTokenAllowed: 8000 },
103
+ {
104
+ name: 'mistralai/mistral-nemo',
105
+ label: 'OpenRouter Mistral Nemo (OpenRouter)',
106
+ provider: 'OpenRouter',
107
+ maxTokenAllowed: 8000,
108
+ },
109
+ {
110
+ name: 'qwen/qwen-110b-chat',
111
+ label: 'OpenRouter Qwen 110b Chat (OpenRouter)',
112
+ provider: 'OpenRouter',
113
+ maxTokenAllowed: 8000,
114
+ },
115
+ { name: 'cohere/command', label: 'Cohere Command (OpenRouter)', provider: 'OpenRouter', maxTokenAllowed: 4096 },
116
  ],
117
  getDynamicModels: getOpenRouterModels,
118
  getApiKeyLink: 'https://openrouter.ai/settings/keys',
119
+ },
120
+ {
121
  name: 'Google',
122
  staticModels: [
123
  { name: 'gemini-1.5-flash-latest', label: 'Gemini 1.5 Flash', provider: 'Google', maxTokenAllowed: 8192 },
 
125
  { name: 'gemini-1.5-flash-8b', label: 'Gemini 1.5 Flash-8b', provider: 'Google', maxTokenAllowed: 8192 },
126
  { name: 'gemini-1.5-pro-latest', label: 'Gemini 1.5 Pro', provider: 'Google', maxTokenAllowed: 8192 },
127
  { name: 'gemini-1.5-pro-002', label: 'Gemini 1.5 Pro-002', provider: 'Google', maxTokenAllowed: 8192 },
128
+ { name: 'gemini-exp-1121', label: 'Gemini exp-1121', provider: 'Google', maxTokenAllowed: 8192 },
129
  ],
130
+ getApiKeyLink: 'https://aistudio.google.com/app/apikey',
131
+ },
132
+ {
133
  name: 'Groq',
134
  staticModels: [
135
  { name: 'llama-3.1-70b-versatile', label: 'Llama 3.1 70b (Groq)', provider: 'Groq', maxTokenAllowed: 8000 },
136
  { name: 'llama-3.1-8b-instant', label: 'Llama 3.1 8b (Groq)', provider: 'Groq', maxTokenAllowed: 8000 },
137
  { name: 'llama-3.2-11b-vision-preview', label: 'Llama 3.2 11b (Groq)', provider: 'Groq', maxTokenAllowed: 8000 },
138
  { name: 'llama-3.2-3b-preview', label: 'Llama 3.2 3b (Groq)', provider: 'Groq', maxTokenAllowed: 8000 },
139
+ { name: 'llama-3.2-1b-preview', label: 'Llama 3.2 1b (Groq)', provider: 'Groq', maxTokenAllowed: 8000 },
140
  ],
141
+ getApiKeyLink: 'https://console.groq.com/keys',
142
  },
143
  {
144
  name: 'HuggingFace',
145
  staticModels: [
146
+ {
147
+ name: 'Qwen/Qwen2.5-Coder-32B-Instruct',
148
+ label: 'Qwen2.5-Coder-32B-Instruct (HuggingFace)',
149
+ provider: 'HuggingFace',
150
+ maxTokenAllowed: 8000,
151
+ },
152
+ {
153
+ name: '01-ai/Yi-1.5-34B-Chat',
154
+ label: 'Yi-1.5-34B-Chat (HuggingFace)',
155
+ provider: 'HuggingFace',
156
+ maxTokenAllowed: 8000,
157
+ },
158
+ {
159
+ name: 'codellama/CodeLlama-34b-Instruct-hf',
160
+ label: 'CodeLlama-34b-Instruct (HuggingFace)',
161
+ provider: 'HuggingFace',
162
+ maxTokenAllowed: 8000,
163
+ },
164
+ {
165
+ name: 'NousResearch/Hermes-3-Llama-3.1-8B',
166
+ label: 'Hermes-3-Llama-3.1-8B (HuggingFace)',
167
+ provider: 'HuggingFace',
168
+ maxTokenAllowed: 8000,
169
+ },
170
+ {
171
+ name: 'Qwen/Qwen2.5-Coder-32B-Instruct',
172
+ label: 'Qwen2.5-Coder-32B-Instruct (HuggingFace)',
173
+ provider: 'HuggingFace',
174
+ maxTokenAllowed: 8000,
175
+ },
176
+ {
177
+ name: 'Qwen/Qwen2.5-72B-Instruct',
178
+ label: 'Qwen2.5-72B-Instruct (HuggingFace)',
179
+ provider: 'HuggingFace',
180
+ maxTokenAllowed: 8000,
181
+ },
182
+ {
183
+ name: 'meta-llama/Llama-3.1-70B-Instruct',
184
+ label: 'Llama-3.1-70B-Instruct (HuggingFace)',
185
+ provider: 'HuggingFace',
186
+ maxTokenAllowed: 8000,
187
+ },
188
+ {
189
+ name: 'meta-llama/Llama-3.1-405B',
190
+ label: 'Llama-3.1-405B (HuggingFace)',
191
+ provider: 'HuggingFace',
192
+ maxTokenAllowed: 8000,
193
+ },
194
+ {
195
+ name: '01-ai/Yi-1.5-34B-Chat',
196
+ label: 'Yi-1.5-34B-Chat (HuggingFace)',
197
+ provider: 'HuggingFace',
198
+ maxTokenAllowed: 8000,
199
+ },
200
+ {
201
+ name: 'codellama/CodeLlama-34b-Instruct-hf',
202
+ label: 'CodeLlama-34b-Instruct (HuggingFace)',
203
+ provider: 'HuggingFace',
204
+ maxTokenAllowed: 8000,
205
+ },
206
+ {
207
+ name: 'NousResearch/Hermes-3-Llama-3.1-8B',
208
+ label: 'Hermes-3-Llama-3.1-8B (HuggingFace)',
209
+ provider: 'HuggingFace',
210
+ maxTokenAllowed: 8000,
211
+ },
212
  ],
213
+ getApiKeyLink: 'https://huggingface.co/settings/tokens',
214
  },
215
 
216
  {
 
219
  { name: 'gpt-4o-mini', label: 'GPT-4o Mini', provider: 'OpenAI', maxTokenAllowed: 8000 },
220
  { name: 'gpt-4-turbo', label: 'GPT-4 Turbo', provider: 'OpenAI', maxTokenAllowed: 8000 },
221
  { name: 'gpt-4', label: 'GPT-4', provider: 'OpenAI', maxTokenAllowed: 8000 },
222
+ { name: 'gpt-3.5-turbo', label: 'GPT-3.5 Turbo', provider: 'OpenAI', maxTokenAllowed: 8000 },
223
  ],
224
+ getApiKeyLink: 'https://platform.openai.com/api-keys',
225
+ },
226
+ {
227
  name: 'xAI',
228
+ staticModels: [{ name: 'grok-beta', label: 'xAI Grok Beta', provider: 'xAI', maxTokenAllowed: 8000 }],
229
+ getApiKeyLink: 'https://docs.x.ai/docs/quickstart#creating-an-api-key',
230
+ },
231
+ {
 
232
  name: 'Deepseek',
233
  staticModels: [
234
  { name: 'deepseek-coder', label: 'Deepseek-Coder', provider: 'Deepseek', maxTokenAllowed: 8000 },
235
+ { name: 'deepseek-chat', label: 'Deepseek-Chat', provider: 'Deepseek', maxTokenAllowed: 8000 },
236
  ],
237
+ getApiKeyLink: 'https://platform.deepseek.com/apiKeys',
238
+ },
239
+ {
240
  name: 'Mistral',
241
  staticModels: [
242
  { name: 'open-mistral-7b', label: 'Mistral 7B', provider: 'Mistral', maxTokenAllowed: 8000 },
 
247
  { name: 'ministral-8b-latest', label: 'Mistral 8B', provider: 'Mistral', maxTokenAllowed: 8000 },
248
  { name: 'mistral-small-latest', label: 'Mistral Small', provider: 'Mistral', maxTokenAllowed: 8000 },
249
  { name: 'codestral-latest', label: 'Codestral', provider: 'Mistral', maxTokenAllowed: 8000 },
250
+ { name: 'mistral-large-latest', label: 'Mistral Large Latest', provider: 'Mistral', maxTokenAllowed: 8000 },
251
  ],
252
+ getApiKeyLink: 'https://console.mistral.ai/api-keys/',
253
+ },
254
+ {
255
  name: 'LMStudio',
256
  staticModels: [],
257
  getDynamicModels: getLMStudioModels,
258
  getApiKeyLink: 'https://lmstudio.ai/',
259
  labelForGetApiKey: 'Get LMStudio',
260
+ icon: 'i-ph:cloud-arrow-down',
261
+ },
262
  ];
263
 
264
  export const DEFAULT_PROVIDER = PROVIDER_LIST[0];
265
 
266
+ const staticModels: ModelInfo[] = PROVIDER_LIST.map((p) => p.staticModels).flat();
267
 
268
  export let MODEL_LIST: ModelInfo[] = [...staticModels];
269
 
270
  const getOllamaBaseUrl = () => {
271
  const defaultBaseUrl = import.meta.env.OLLAMA_API_BASE_URL || 'http://localhost:11434';
272
+
273
  // Check if we're in the browser
274
  if (typeof window !== 'undefined') {
275
  // Frontend always uses localhost
 
279
  // Backend: Check if we're running in Docker
280
  const isDocker = process.env.RUNNING_IN_DOCKER === 'true';
281
 
282
+ return isDocker ? defaultBaseUrl.replace('localhost', 'host.docker.internal') : defaultBaseUrl;
 
 
283
  };
284
 
285
  async function getOllamaModels(): Promise<ModelInfo[]> {
286
  try {
287
+ const baseUrl = getOllamaBaseUrl();
288
+ const response = await fetch(`${baseUrl}/api/tags`);
289
+ const data = (await response.json()) as OllamaApiResponse;
290
 
291
  return data.models.map((model: OllamaModel) => ({
292
  name: model.name,
293
  label: `${model.name} (${model.details.parameter_size})`,
294
  provider: 'Ollama',
295
+ maxTokenAllowed: 8000,
296
  }));
297
+ // eslint-disable-next-line @typescript-eslint/no-unused-vars
298
  } catch (e) {
299
  return [];
300
  }
 
302
 
303
  async function getOpenAILikeModels(): Promise<ModelInfo[]> {
304
  try {
305
+ const baseUrl = import.meta.env.OPENAI_LIKE_API_BASE_URL || '';
306
+
307
+ if (!baseUrl) {
308
  return [];
309
  }
310
+
311
+ const apiKey = import.meta.env.OPENAI_LIKE_API_KEY ?? '';
312
+ const response = await fetch(`${baseUrl}/models`, {
313
  headers: {
314
+ Authorization: `Bearer ${apiKey}`,
315
+ },
316
  });
317
+ const res = (await response.json()) as any;
318
+
319
  return res.data.map((model: any) => ({
320
  name: model.id,
321
  label: model.id,
322
+ provider: 'OpenAILike',
323
  }));
324
+ // eslint-disable-next-line @typescript-eslint/no-unused-vars
325
  } catch (e) {
326
  return [];
327
  }
 
335
  pricing: {
336
  prompt: number;
337
  completion: number;
338
+ };
339
+ }[];
340
  };
341
 
342
  async function getOpenRouterModels(): Promise<ModelInfo[]> {
343
+ const data: OpenRouterModelsResponse = await (
344
+ await fetch('https://openrouter.ai/api/v1/models', {
345
+ headers: {
346
+ 'Content-Type': 'application/json',
347
+ },
348
+ })
349
+ ).json();
350
 
351
+ return data.data
352
+ .sort((a, b) => a.name.localeCompare(b.name))
353
+ .map((m) => ({
354
+ name: m.id,
355
+ label: `${m.name} - in:$${(m.pricing.prompt * 1_000_000).toFixed(
356
+ 2,
357
+ )} out:$${(m.pricing.completion * 1_000_000).toFixed(2)} - context ${Math.floor(m.context_length / 1000)}k`,
358
+ provider: 'OpenRouter',
359
+ maxTokenAllowed: 8000,
360
+ }));
361
  }
362
 
363
  async function getLMStudioModels(): Promise<ModelInfo[]> {
364
  try {
365
+ const baseUrl = import.meta.env.LMSTUDIO_API_BASE_URL || 'http://localhost:1234';
366
+ const response = await fetch(`${baseUrl}/v1/models`);
367
+ const data = (await response.json()) as any;
368
+
369
  return data.data.map((model: any) => ({
370
  name: model.id,
371
  label: model.id,
372
+ provider: 'LMStudio',
373
  }));
374
+ // eslint-disable-next-line @typescript-eslint/no-unused-vars
375
  } catch (e) {
376
  return [];
377
  }
378
  }
379
 
 
 
380
  async function initializeModelList(): Promise<ModelInfo[]> {
381
+ MODEL_LIST = [
382
+ ...(
383
+ await Promise.all(
384
+ PROVIDER_LIST.filter(
385
+ (p): p is ProviderInfo & { getDynamicModels: () => Promise<ModelInfo[]> } => !!p.getDynamicModels,
386
+ ).map((p) => p.getDynamicModels()),
387
+ )
388
+ ).flat(),
389
+ ...staticModels,
390
+ ];
391
  return MODEL_LIST;
392
  }
393
 
394
+ export {
395
+ getOllamaModels,
396
+ getOpenAILikeModels,
397
+ getLMStudioModels,
398
+ initializeModelList,
399
+ getOpenRouterModels,
400
+ PROVIDER_LIST,
401
+ };
app/utils/shell.ts CHANGED
@@ -52,67 +52,77 @@ export async function newShellProcess(webcontainer: WebContainer, terminal: ITer
52
  return process;
53
  }
54
 
55
-
56
 
57
  export class BoltShell {
58
- #initialized: (() => void) | undefined
59
- #readyPromise: Promise<void>
60
- #webcontainer: WebContainer | undefined
61
- #terminal: ITerminal | undefined
62
- #process: WebContainerProcess | undefined
63
- executionState = atom<{ sessionId: string, active: boolean, executionPrms?: Promise<any> } | undefined>()
64
- #outputStream: ReadableStreamDefaultReader<string> | undefined
65
- #shellInputStream: WritableStreamDefaultWriter<string> | undefined
 
66
  constructor() {
67
  this.#readyPromise = new Promise((resolve) => {
68
- this.#initialized = resolve
69
- })
70
  }
 
71
  ready() {
72
  return this.#readyPromise;
73
  }
 
74
  async init(webcontainer: WebContainer, terminal: ITerminal) {
75
- this.#webcontainer = webcontainer
76
- this.#terminal = terminal
77
- let callback = (data: string) => {
78
- console.log(data)
79
- }
80
- let { process, output } = await this.newBoltShellProcess(webcontainer, terminal)
81
- this.#process = process
82
- this.#outputStream = output.getReader()
83
- await this.waitTillOscCode('interactive')
84
- this.#initialized?.()
85
  }
 
86
  get terminal() {
87
- return this.#terminal
88
  }
 
89
  get process() {
90
- return this.#process
91
  }
92
- async executeCommand(sessionId: string, command: string) {
 
93
  if (!this.process || !this.terminal) {
94
- return
95
  }
96
- let state = this.executionState.get()
97
 
98
- //interrupt the current execution
99
- // this.#shellInputStream?.write('\x03');
 
 
 
 
100
  this.terminal.input('\x03');
 
101
  if (state && state.executionPrms) {
102
- await state.executionPrms
103
  }
 
104
  //start a new execution
105
  this.terminal.input(command.trim() + '\n');
106
 
107
  //wait for the execution to finish
108
- let executionPrms = this.getCurrentExecutionResult()
109
- this.executionState.set({ sessionId, active: true, executionPrms })
110
 
111
- let resp = await executionPrms
112
- this.executionState.set({ sessionId, active: false })
113
- return resp
114
 
 
115
  }
 
116
  async newBoltShellProcess(webcontainer: WebContainer, terminal: ITerminal) {
117
  const args: string[] = [];
118
 
@@ -126,6 +136,7 @@ export class BoltShell {
126
 
127
  const input = process.input.getWriter();
128
  this.#shellInputStream = input;
 
129
  const [internalOutput, terminalOutput] = process.output.tee();
130
 
131
  const jshReady = withResolvers<void>();
@@ -162,34 +173,48 @@ export class BoltShell {
162
 
163
  return { process, output: internalOutput };
164
  }
165
- async getCurrentExecutionResult() {
166
- let { output, exitCode } = await this.waitTillOscCode('exit')
 
167
  return { output, exitCode };
168
  }
 
169
  async waitTillOscCode(waitCode: string) {
170
  let fullOutput = '';
171
  let exitCode: number = 0;
172
- if (!this.#outputStream) return { output: fullOutput, exitCode };
173
- let tappedStream = this.#outputStream
 
 
 
 
174
 
175
  while (true) {
176
  const { value, done } = await tappedStream.read();
177
- if (done) break;
 
 
 
 
178
  const text = value || '';
179
  fullOutput += text;
180
 
181
  // Check if command completion signal with exit code
182
- const [, osc, , pid, code] = text.match(/\x1b\]654;([^\x07=]+)=?((-?\d+):(\d+))?\x07/) || [];
 
183
  if (osc === 'exit') {
184
  exitCode = parseInt(code, 10);
185
  }
 
186
  if (osc === waitCode) {
187
  break;
188
  }
189
  }
 
190
  return { output: fullOutput, exitCode };
191
  }
192
  }
 
193
  export function newBoltShellProcess() {
194
  return new BoltShell();
195
  }
 
52
  return process;
53
  }
54
 
55
+ export type ExecutionResult = { output: string; exitCode: number } | undefined;
56
 
57
  export class BoltShell {
58
+ #initialized: (() => void) | undefined;
59
+ #readyPromise: Promise<void>;
60
+ #webcontainer: WebContainer | undefined;
61
+ #terminal: ITerminal | undefined;
62
+ #process: WebContainerProcess | undefined;
63
+ executionState = atom<{ sessionId: string; active: boolean; executionPrms?: Promise<any> } | undefined>();
64
+ #outputStream: ReadableStreamDefaultReader<string> | undefined;
65
+ #shellInputStream: WritableStreamDefaultWriter<string> | undefined;
66
+
67
  constructor() {
68
  this.#readyPromise = new Promise((resolve) => {
69
+ this.#initialized = resolve;
70
+ });
71
  }
72
+
73
  ready() {
74
  return this.#readyPromise;
75
  }
76
+
77
  async init(webcontainer: WebContainer, terminal: ITerminal) {
78
+ this.#webcontainer = webcontainer;
79
+ this.#terminal = terminal;
80
+
81
+ const { process, output } = await this.newBoltShellProcess(webcontainer, terminal);
82
+ this.#process = process;
83
+ this.#outputStream = output.getReader();
84
+ await this.waitTillOscCode('interactive');
85
+ this.#initialized?.();
 
 
86
  }
87
+
88
  get terminal() {
89
+ return this.#terminal;
90
  }
91
+
92
  get process() {
93
+ return this.#process;
94
  }
95
+
96
+ async executeCommand(sessionId: string, command: string): Promise<ExecutionResult> {
97
  if (!this.process || !this.terminal) {
98
+ return undefined;
99
  }
 
100
 
101
+ const state = this.executionState.get();
102
+
103
+ /*
104
+ * interrupt the current execution
105
+ * this.#shellInputStream?.write('\x03');
106
+ */
107
  this.terminal.input('\x03');
108
+
109
  if (state && state.executionPrms) {
110
+ await state.executionPrms;
111
  }
112
+
113
  //start a new execution
114
  this.terminal.input(command.trim() + '\n');
115
 
116
  //wait for the execution to finish
117
+ const executionPromise = this.getCurrentExecutionResult();
118
+ this.executionState.set({ sessionId, active: true, executionPrms: executionPromise });
119
 
120
+ const resp = await executionPromise;
121
+ this.executionState.set({ sessionId, active: false });
 
122
 
123
+ return resp;
124
  }
125
+
126
  async newBoltShellProcess(webcontainer: WebContainer, terminal: ITerminal) {
127
  const args: string[] = [];
128
 
 
136
 
137
  const input = process.input.getWriter();
138
  this.#shellInputStream = input;
139
+
140
  const [internalOutput, terminalOutput] = process.output.tee();
141
 
142
  const jshReady = withResolvers<void>();
 
173
 
174
  return { process, output: internalOutput };
175
  }
176
+
177
+ async getCurrentExecutionResult(): Promise<ExecutionResult> {
178
+ const { output, exitCode } = await this.waitTillOscCode('exit');
179
  return { output, exitCode };
180
  }
181
+
182
  async waitTillOscCode(waitCode: string) {
183
  let fullOutput = '';
184
  let exitCode: number = 0;
185
+
186
+ if (!this.#outputStream) {
187
+ return { output: fullOutput, exitCode };
188
+ }
189
+
190
+ const tappedStream = this.#outputStream;
191
 
192
  while (true) {
193
  const { value, done } = await tappedStream.read();
194
+
195
+ if (done) {
196
+ break;
197
+ }
198
+
199
  const text = value || '';
200
  fullOutput += text;
201
 
202
  // Check if command completion signal with exit code
203
+ const [, osc, , , code] = text.match(/\x1b\]654;([^\x07=]+)=?((-?\d+):(\d+))?\x07/) || [];
204
+
205
  if (osc === 'exit') {
206
  exitCode = parseInt(code, 10);
207
  }
208
+
209
  if (osc === waitCode) {
210
  break;
211
  }
212
  }
213
+
214
  return { output: fullOutput, exitCode };
215
  }
216
  }
217
+
218
  export function newBoltShellProcess() {
219
  return new BoltShell();
220
  }
app/utils/types.ts CHANGED
@@ -1,4 +1,3 @@
1
-
2
  interface OllamaModelDetails {
3
  parent_model: string;
4
  format: string;
@@ -29,10 +28,10 @@ export interface ModelInfo {
29
  }
30
 
31
  export interface ProviderInfo {
32
- staticModels: ModelInfo[],
33
- name: string,
34
- getDynamicModels?: () => Promise<ModelInfo[]>,
35
- getApiKeyLink?: string,
36
- labelForGetApiKey?: string,
37
- icon?:string,
38
- };
 
 
1
  interface OllamaModelDetails {
2
  parent_model: string;
3
  format: string;
 
28
  }
29
 
30
  export interface ProviderInfo {
31
+ staticModels: ModelInfo[];
32
+ name: string;
33
+ getDynamicModels?: () => Promise<ModelInfo[]>;
34
+ getApiKeyLink?: string;
35
+ labelForGetApiKey?: string;
36
+ icon?: string;
37
+ }
eslint.config.mjs CHANGED
@@ -12,6 +12,8 @@ export default [
12
  '@blitz/catch-error-name': 'off',
13
  '@typescript-eslint/no-this-alias': 'off',
14
  '@typescript-eslint/no-empty-object-type': 'off',
 
 
15
  },
16
  },
17
  {
 
12
  '@blitz/catch-error-name': 'off',
13
  '@typescript-eslint/no-this-alias': 'off',
14
  '@typescript-eslint/no-empty-object-type': 'off',
15
+ '@blitz/comment-syntax': 'off',
16
+ '@blitz/block-scope-case': 'off',
17
  },
18
  },
19
  {
package.json CHANGED
@@ -11,8 +11,8 @@
11
  "dev": "remix vite:dev",
12
  "test": "vitest --run",
13
  "test:watch": "vitest",
14
- "lint": "eslint --cache --cache-location ./node_modules/.cache/eslint .",
15
- "lint:fix": "npm run lint -- --fix",
16
  "start": "bindings=$(./bindings.sh) && wrangler pages dev ./build/client $bindings",
17
  "dockerstart": "bindings=$(./bindings.sh) && wrangler pages dev ./build/client $bindings --ip 0.0.0.0 --port 5173 --no-show-interactive-dev-session",
18
  "dockerrun": "docker run -it -d --name bolt-ai-live -p 5173:5173 --env-file .env.local bolt-ai",
 
11
  "dev": "remix vite:dev",
12
  "test": "vitest --run",
13
  "test:watch": "vitest",
14
+ "lint": "eslint --cache --cache-location ./node_modules/.cache/eslint app",
15
+ "lint:fix": "npm run lint -- --fix && prettier app --write",
16
  "start": "bindings=$(./bindings.sh) && wrangler pages dev ./build/client $bindings",
17
  "dockerstart": "bindings=$(./bindings.sh) && wrangler pages dev ./build/client $bindings --ip 0.0.0.0 --port 5173 --no-show-interactive-dev-session",
18
  "dockerrun": "docker run -it -d --name bolt-ai-live -p 5173:5173 --env-file .env.local bolt-ai",
worker-configuration.d.ts CHANGED
@@ -9,4 +9,7 @@ interface Env {
9
  OPENAI_LIKE_API_BASE_URL: string;
10
  DEEPSEEK_API_KEY: string;
11
  LMSTUDIO_API_BASE_URL: string;
 
 
 
12
  }
 
9
  OPENAI_LIKE_API_BASE_URL: string;
10
  DEEPSEEK_API_KEY: string;
11
  LMSTUDIO_API_BASE_URL: string;
12
+ GOOGLE_GENERATIVE_AI_API_KEY: string;
13
+ MISTRAL_API_KEY: string;
14
+ XAI_API_KEY: string;
15
  }