codacus commited on
Commit
6494f5a
·
unverified ·
1 Parent(s): 389eedc

fix: updated logger and model caching minor bugfix #release (#895)

Browse files

* fix: updated logger and model caching

* usage token stream issue fix

* minor changes

* updated starter template change to fix the app title

* starter template bigfix

* fixed hydretion errors and raw logs

* removed raw log

* made auto select template false by default

* more cleaner logs and updated logic to call dynamicModels only if not found in static models

* updated starter template instructions

* browser console log improved for firefox

* provider icons fix icons

app/components/chat/BaseChat.tsx CHANGED
@@ -168,30 +168,32 @@ export const BaseChat = React.forwardRef<HTMLDivElement, BaseChatProps>(
168
  }, []);
169
 
170
  useEffect(() => {
171
- const providerSettings = getProviderSettings();
172
- let parsedApiKeys: Record<string, string> | undefined = {};
 
173
 
174
- try {
175
- parsedApiKeys = getApiKeysFromCookies();
176
- setApiKeys(parsedApiKeys);
177
- } catch (error) {
178
- console.error('Error loading API keys from cookies:', error);
179
 
180
- // Clear invalid cookie data
181
- Cookies.remove('apiKeys');
 
 
 
 
 
 
 
 
 
 
 
 
 
182
  }
183
- setIsModelLoading('all');
184
- initializeModelList({ apiKeys: parsedApiKeys, providerSettings })
185
- .then((modelList) => {
186
- console.log('Model List: ', modelList);
187
- setModelList(modelList);
188
- })
189
- .catch((error) => {
190
- console.error('Error initializing model list:', error);
191
- })
192
- .finally(() => {
193
- setIsModelLoading(undefined);
194
- });
195
  }, [providerList]);
196
 
197
  const onApiKeysChange = async (providerName: string, apiKey: string) => {
@@ -401,28 +403,32 @@ export const BaseChat = React.forwardRef<HTMLDivElement, BaseChatProps>(
401
  <rect className={classNames(styles.PromptShine)} x="48" y="24" width="70" height="1"></rect>
402
  </svg>
403
  <div>
404
- <div className={isModelSettingsCollapsed ? 'hidden' : ''}>
405
- <ModelSelector
406
- key={provider?.name + ':' + modelList.length}
407
- model={model}
408
- setModel={setModel}
409
- modelList={modelList}
410
- provider={provider}
411
- setProvider={setProvider}
412
- providerList={providerList || (PROVIDER_LIST as ProviderInfo[])}
413
- apiKeys={apiKeys}
414
- modelLoading={isModelLoading}
415
- />
416
- {(providerList || []).length > 0 && provider && (
417
- <APIKeyManager
418
- provider={provider}
419
- apiKey={apiKeys[provider.name] || ''}
420
- setApiKey={(key) => {
421
- onApiKeysChange(provider.name, key);
422
- }}
423
- />
 
 
 
 
424
  )}
425
- </div>
426
  </div>
427
  <FilePreview
428
  files={uploadedFiles}
 
168
  }, []);
169
 
170
  useEffect(() => {
171
+ if (typeof window !== 'undefined') {
172
+ const providerSettings = getProviderSettings();
173
+ let parsedApiKeys: Record<string, string> | undefined = {};
174
 
175
+ try {
176
+ parsedApiKeys = getApiKeysFromCookies();
177
+ setApiKeys(parsedApiKeys);
178
+ } catch (error) {
179
+ console.error('Error loading API keys from cookies:', error);
180
 
181
+ // Clear invalid cookie data
182
+ Cookies.remove('apiKeys');
183
+ }
184
+ setIsModelLoading('all');
185
+ initializeModelList({ apiKeys: parsedApiKeys, providerSettings })
186
+ .then((modelList) => {
187
+ // console.log('Model List: ', modelList);
188
+ setModelList(modelList);
189
+ })
190
+ .catch((error) => {
191
+ console.error('Error initializing model list:', error);
192
+ })
193
+ .finally(() => {
194
+ setIsModelLoading(undefined);
195
+ });
196
  }
 
 
 
 
 
 
 
 
 
 
 
 
197
  }, [providerList]);
198
 
199
  const onApiKeysChange = async (providerName: string, apiKey: string) => {
 
403
  <rect className={classNames(styles.PromptShine)} x="48" y="24" width="70" height="1"></rect>
404
  </svg>
405
  <div>
406
+ <ClientOnly>
407
+ {() => (
408
+ <div className={isModelSettingsCollapsed ? 'hidden' : ''}>
409
+ <ModelSelector
410
+ key={provider?.name + ':' + modelList.length}
411
+ model={model}
412
+ setModel={setModel}
413
+ modelList={modelList}
414
+ provider={provider}
415
+ setProvider={setProvider}
416
+ providerList={providerList || (PROVIDER_LIST as ProviderInfo[])}
417
+ apiKeys={apiKeys}
418
+ modelLoading={isModelLoading}
419
+ />
420
+ {(providerList || []).length > 0 && provider && (
421
+ <APIKeyManager
422
+ provider={provider}
423
+ apiKey={apiKeys[provider.name] || ''}
424
+ setApiKey={(key) => {
425
+ onApiKeysChange(provider.name, key);
426
+ }}
427
+ />
428
+ )}
429
+ </div>
430
  )}
431
+ </ClientOnly>
432
  </div>
433
  <FilePreview
434
  files={uploadedFiles}
app/components/chat/Chat.client.tsx CHANGED
@@ -168,7 +168,8 @@ export const ChatImpl = memo(
168
  });
169
  useEffect(() => {
170
  const prompt = searchParams.get('prompt');
171
- console.log(prompt, searchParams, model, provider);
 
172
 
173
  if (prompt) {
174
  setSearchParams({});
@@ -289,14 +290,14 @@ export const ChatImpl = memo(
289
 
290
  // reload();
291
 
292
- const template = await selectStarterTemplate({
293
  message: messageInput,
294
  model,
295
  provider,
296
  });
297
 
298
  if (template !== 'blank') {
299
- const temResp = await getTemplates(template);
300
 
301
  if (temResp) {
302
  const { assistantMessage, userMessage } = temResp;
 
168
  });
169
  useEffect(() => {
170
  const prompt = searchParams.get('prompt');
171
+
172
+ // console.log(prompt, searchParams, model, provider);
173
 
174
  if (prompt) {
175
  setSearchParams({});
 
290
 
291
  // reload();
292
 
293
+ const { template, title } = await selectStarterTemplate({
294
  message: messageInput,
295
  model,
296
  provider,
297
  });
298
 
299
  if (template !== 'blank') {
300
+ const temResp = await getTemplates(template, title);
301
 
302
  if (temResp) {
303
  const { assistantMessage, userMessage } = temResp;
app/components/settings/providers/ProvidersTab.tsx CHANGED
@@ -6,9 +6,10 @@ import type { IProviderConfig } from '~/types/model';
6
  import { logStore } from '~/lib/stores/logs';
7
 
8
  // Import a default fallback icon
9
- import DefaultIcon from '/icons/Default.svg'; // Adjust the path as necessary
10
  import { providerBaseUrlEnvKeys } from '~/utils/constants';
11
 
 
 
12
  export default function ProvidersTab() {
13
  const { providers, updateProviderSettings, isLocalModel } = useSettings();
14
  const [filteredProviders, setFilteredProviders] = useState<IProviderConfig[]>([]);
 
6
  import { logStore } from '~/lib/stores/logs';
7
 
8
  // Import a default fallback icon
 
9
  import { providerBaseUrlEnvKeys } from '~/utils/constants';
10
 
11
+ const DefaultIcon = '/icons/Default.svg'; // Adjust the path as necessary
12
+
13
  export default function ProvidersTab() {
14
  const { providers, updateProviderSettings, isLocalModel } = useSettings();
15
  const [filteredProviders, setFilteredProviders] = useState<IProviderConfig[]>([]);
app/entry.server.tsx CHANGED
@@ -5,7 +5,6 @@ import { renderToReadableStream } from 'react-dom/server';
5
  import { renderHeadToString } from 'remix-island';
6
  import { Head } from './root';
7
  import { themeStore } from '~/lib/stores/theme';
8
- import { initializeModelList } from '~/utils/constants';
9
 
10
  export default async function handleRequest(
11
  request: Request,
@@ -14,7 +13,7 @@ export default async function handleRequest(
14
  remixContext: EntryContext,
15
  _loadContext: AppLoadContext,
16
  ) {
17
- await initializeModelList({});
18
 
19
  const readable = await renderToReadableStream(<RemixServer context={remixContext} url={request.url} />, {
20
  signal: request.signal,
 
5
  import { renderHeadToString } from 'remix-island';
6
  import { Head } from './root';
7
  import { themeStore } from '~/lib/stores/theme';
 
8
 
9
  export default async function handleRequest(
10
  request: Request,
 
13
  remixContext: EntryContext,
14
  _loadContext: AppLoadContext,
15
  ) {
16
+ // await initializeModelList({});
17
 
18
  const readable = await renderToReadableStream(<RemixServer context={remixContext} url={request.url} />, {
19
  signal: request.signal,
app/lib/.server/llm/stream-text.ts CHANGED
@@ -4,7 +4,6 @@ import { getSystemPrompt } from '~/lib/common/prompts/prompts';
4
  import {
5
  DEFAULT_MODEL,
6
  DEFAULT_PROVIDER,
7
- getModelList,
8
  MODEL_REGEX,
9
  MODIFICATIONS_TAG_NAME,
10
  PROVIDER_LIST,
@@ -15,6 +14,8 @@ import ignore from 'ignore';
15
  import type { IProviderSetting } from '~/types/model';
16
  import { PromptLibrary } from '~/lib/common/prompt-library';
17
  import { allowedHTMLElements } from '~/utils/markdown';
 
 
18
 
19
  interface ToolResult<Name extends string, Args, Result> {
20
  toolCallId: string;
@@ -142,6 +143,8 @@ function extractPropertiesFromMessage(message: Message): { model: string; provid
142
  return { model, provider, content: cleanedContent };
143
  }
144
 
 
 
145
  export async function streamText(props: {
146
  messages: Messages;
147
  env: Env;
@@ -158,15 +161,10 @@ export async function streamText(props: {
158
 
159
  let currentModel = DEFAULT_MODEL;
160
  let currentProvider = DEFAULT_PROVIDER.name;
161
- const MODEL_LIST = await getModelList({ apiKeys, providerSettings, serverEnv: serverEnv as any });
162
  const processedMessages = messages.map((message) => {
163
  if (message.role === 'user') {
164
  const { model, provider, content } = extractPropertiesFromMessage(message);
165
-
166
- if (MODEL_LIST.find((m) => m.name === model)) {
167
- currentModel = model;
168
- }
169
-
170
  currentProvider = provider;
171
 
172
  return { ...message, content };
@@ -183,11 +181,36 @@ export async function streamText(props: {
183
  return message;
184
  });
185
 
186
- const modelDetails = MODEL_LIST.find((m) => m.name === currentModel);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
187
 
188
- const dynamicMaxTokens = modelDetails && modelDetails.maxTokenAllowed ? modelDetails.maxTokenAllowed : MAX_TOKENS;
189
 
190
- const provider = PROVIDER_LIST.find((p) => p.name === currentProvider) || DEFAULT_PROVIDER;
 
 
 
 
 
 
 
 
 
191
 
192
  let systemPrompt =
193
  PromptLibrary.getPropmtFromLibrary(promptId || 'default', {
@@ -201,6 +224,8 @@ export async function streamText(props: {
201
  systemPrompt = `${systemPrompt}\n\n ${codeContext}`;
202
  }
203
 
 
 
204
  return _streamText({
205
  model: provider.getModelInstance({
206
  model: currentModel,
 
4
  import {
5
  DEFAULT_MODEL,
6
  DEFAULT_PROVIDER,
 
7
  MODEL_REGEX,
8
  MODIFICATIONS_TAG_NAME,
9
  PROVIDER_LIST,
 
14
  import type { IProviderSetting } from '~/types/model';
15
  import { PromptLibrary } from '~/lib/common/prompt-library';
16
  import { allowedHTMLElements } from '~/utils/markdown';
17
+ import { LLMManager } from '~/lib/modules/llm/manager';
18
+ import { createScopedLogger } from '~/utils/logger';
19
 
20
  interface ToolResult<Name extends string, Args, Result> {
21
  toolCallId: string;
 
143
  return { model, provider, content: cleanedContent };
144
  }
145
 
146
+ const logger = createScopedLogger('stream-text');
147
+
148
  export async function streamText(props: {
149
  messages: Messages;
150
  env: Env;
 
161
 
162
  let currentModel = DEFAULT_MODEL;
163
  let currentProvider = DEFAULT_PROVIDER.name;
 
164
  const processedMessages = messages.map((message) => {
165
  if (message.role === 'user') {
166
  const { model, provider, content } = extractPropertiesFromMessage(message);
167
+ currentModel = model;
 
 
 
 
168
  currentProvider = provider;
169
 
170
  return { ...message, content };
 
181
  return message;
182
  });
183
 
184
+ const provider = PROVIDER_LIST.find((p) => p.name === currentProvider) || DEFAULT_PROVIDER;
185
+ const staticModels = LLMManager.getInstance().getStaticModelListFromProvider(provider);
186
+ let modelDetails = staticModels.find((m) => m.name === currentModel);
187
+
188
+ if (!modelDetails) {
189
+ const modelsList = [
190
+ ...(provider.staticModels || []),
191
+ ...(await LLMManager.getInstance().getModelListFromProvider(provider, {
192
+ apiKeys,
193
+ providerSettings,
194
+ serverEnv: serverEnv as any,
195
+ })),
196
+ ];
197
+
198
+ if (!modelsList.length) {
199
+ throw new Error(`No models found for provider ${provider.name}`);
200
+ }
201
 
202
+ modelDetails = modelsList.find((m) => m.name === currentModel);
203
 
204
+ if (!modelDetails) {
205
+ // Fallback to first model
206
+ logger.warn(
207
+ `MODEL [${currentModel}] not found in provider [${provider.name}]. Falling back to first model. ${modelsList[0].name}`,
208
+ );
209
+ modelDetails = modelsList[0];
210
+ }
211
+ }
212
+
213
+ const dynamicMaxTokens = modelDetails && modelDetails.maxTokenAllowed ? modelDetails.maxTokenAllowed : MAX_TOKENS;
214
 
215
  let systemPrompt =
216
  PromptLibrary.getPropmtFromLibrary(promptId || 'default', {
 
224
  systemPrompt = `${systemPrompt}\n\n ${codeContext}`;
225
  }
226
 
227
+ logger.info(`Sending llm call to ${provider.name} with model ${modelDetails.name}`);
228
+
229
  return _streamText({
230
  model: provider.getModelInstance({
231
  model: currentModel,
app/lib/modules/llm/base-provider.ts CHANGED
@@ -8,6 +8,10 @@ export abstract class BaseProvider implements ProviderInfo {
8
  abstract name: string;
9
  abstract staticModels: ModelInfo[];
10
  abstract config: ProviderConfig;
 
 
 
 
11
 
12
  getApiKeyLink?: string;
13
  labelForGetApiKey?: string;
@@ -49,6 +53,54 @@ export abstract class BaseProvider implements ProviderInfo {
49
  apiKey,
50
  };
51
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
52
 
53
  // Declare the optional getDynamicModels method
54
  getDynamicModels?(
 
8
  abstract name: string;
9
  abstract staticModels: ModelInfo[];
10
  abstract config: ProviderConfig;
11
+ cachedDynamicModels?: {
12
+ cacheId: string;
13
+ models: ModelInfo[];
14
+ };
15
 
16
  getApiKeyLink?: string;
17
  labelForGetApiKey?: string;
 
53
  apiKey,
54
  };
55
  }
56
+ getModelsFromCache(options: {
57
+ apiKeys?: Record<string, string>;
58
+ providerSettings?: Record<string, IProviderSetting>;
59
+ serverEnv?: Record<string, string>;
60
+ }): ModelInfo[] | null {
61
+ if (!this.cachedDynamicModels) {
62
+ // console.log('no dynamic models',this.name);
63
+ return null;
64
+ }
65
+
66
+ const cacheKey = this.cachedDynamicModels.cacheId;
67
+ const generatedCacheKey = this.getDynamicModelsCacheKey(options);
68
+
69
+ if (cacheKey !== generatedCacheKey) {
70
+ // console.log('cache key mismatch',this.name,cacheKey,generatedCacheKey);
71
+ this.cachedDynamicModels = undefined;
72
+ return null;
73
+ }
74
+
75
+ return this.cachedDynamicModels.models;
76
+ }
77
+ getDynamicModelsCacheKey(options: {
78
+ apiKeys?: Record<string, string>;
79
+ providerSettings?: Record<string, IProviderSetting>;
80
+ serverEnv?: Record<string, string>;
81
+ }) {
82
+ return JSON.stringify({
83
+ apiKeys: options.apiKeys?.[this.name],
84
+ providerSettings: options.providerSettings?.[this.name],
85
+ serverEnv: options.serverEnv,
86
+ });
87
+ }
88
+ storeDynamicModels(
89
+ options: {
90
+ apiKeys?: Record<string, string>;
91
+ providerSettings?: Record<string, IProviderSetting>;
92
+ serverEnv?: Record<string, string>;
93
+ },
94
+ models: ModelInfo[],
95
+ ) {
96
+ const cacheId = this.getDynamicModelsCacheKey(options);
97
+
98
+ // console.log('caching dynamic models',this.name,cacheId);
99
+ this.cachedDynamicModels = {
100
+ cacheId,
101
+ models,
102
+ };
103
+ }
104
 
105
  // Declare the optional getDynamicModels method
106
  getDynamicModels?(
app/lib/modules/llm/manager.ts CHANGED
@@ -2,7 +2,9 @@ import type { IProviderSetting } from '~/types/model';
2
  import { BaseProvider } from './base-provider';
3
  import type { ModelInfo, ProviderInfo } from './types';
4
  import * as providers from './registry';
 
5
 
 
6
  export class LLMManager {
7
  private static _instance: LLMManager;
8
  private _providers: Map<string, BaseProvider> = new Map();
@@ -40,22 +42,22 @@ export class LLMManager {
40
  try {
41
  this.registerProvider(provider);
42
  } catch (error: any) {
43
- console.log('Failed To Register Provider: ', provider.name, 'error:', error.message);
44
  }
45
  }
46
  }
47
  } catch (error) {
48
- console.error('Error registering providers:', error);
49
  }
50
  }
51
 
52
  registerProvider(provider: BaseProvider) {
53
  if (this._providers.has(provider.name)) {
54
- console.warn(`Provider ${provider.name} is already registered. Skipping.`);
55
  return;
56
  }
57
 
58
- console.log('Registering Provider: ', provider.name);
59
  this._providers.set(provider.name, provider);
60
  this._modelList = [...this._modelList, ...provider.staticModels];
61
  }
@@ -93,12 +95,28 @@ export class LLMManager {
93
  (provider): provider is BaseProvider & Required<Pick<ProviderInfo, 'getDynamicModels'>> =>
94
  !!provider.getDynamicModels,
95
  )
96
- .map((provider) =>
97
- provider.getDynamicModels(apiKeys, providerSettings?.[provider.name], serverEnv).catch((err) => {
98
- console.error(`Error getting dynamic models ${provider.name} :`, err);
99
- return [];
100
- }),
101
- ),
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
102
  );
103
 
104
  // Combine static and dynamic models
@@ -110,6 +128,68 @@ export class LLMManager {
110
 
111
  return modelList;
112
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
113
 
114
  getDefaultProvider(): BaseProvider {
115
  const firstProvider = this._providers.values().next().value;
 
2
  import { BaseProvider } from './base-provider';
3
  import type { ModelInfo, ProviderInfo } from './types';
4
  import * as providers from './registry';
5
+ import { createScopedLogger } from '~/utils/logger';
6
 
7
+ const logger = createScopedLogger('LLMManager');
8
  export class LLMManager {
9
  private static _instance: LLMManager;
10
  private _providers: Map<string, BaseProvider> = new Map();
 
42
  try {
43
  this.registerProvider(provider);
44
  } catch (error: any) {
45
+ logger.warn('Failed To Register Provider: ', provider.name, 'error:', error.message);
46
  }
47
  }
48
  }
49
  } catch (error) {
50
+ logger.error('Error registering providers:', error);
51
  }
52
  }
53
 
54
  registerProvider(provider: BaseProvider) {
55
  if (this._providers.has(provider.name)) {
56
+ logger.warn(`Provider ${provider.name} is already registered. Skipping.`);
57
  return;
58
  }
59
 
60
+ logger.info('Registering Provider: ', provider.name);
61
  this._providers.set(provider.name, provider);
62
  this._modelList = [...this._modelList, ...provider.staticModels];
63
  }
 
95
  (provider): provider is BaseProvider & Required<Pick<ProviderInfo, 'getDynamicModels'>> =>
96
  !!provider.getDynamicModels,
97
  )
98
+ .map(async (provider) => {
99
+ const cachedModels = provider.getModelsFromCache(options);
100
+
101
+ if (cachedModels) {
102
+ return cachedModels;
103
+ }
104
+
105
+ const dynamicModels = await provider
106
+ .getDynamicModels(apiKeys, providerSettings?.[provider.name], serverEnv)
107
+ .then((models) => {
108
+ logger.info(`Caching ${models.length} dynamic models for ${provider.name}`);
109
+ provider.storeDynamicModels(options, models);
110
+
111
+ return models;
112
+ })
113
+ .catch((err) => {
114
+ logger.error(`Error getting dynamic models ${provider.name} :`, err);
115
+ return [];
116
+ });
117
+
118
+ return dynamicModels;
119
+ }),
120
  );
121
 
122
  // Combine static and dynamic models
 
128
 
129
  return modelList;
130
  }
131
+ getStaticModelList() {
132
+ return [...this._providers.values()].flatMap((p) => p.staticModels || []);
133
+ }
134
+ async getModelListFromProvider(
135
+ providerArg: BaseProvider,
136
+ options: {
137
+ apiKeys?: Record<string, string>;
138
+ providerSettings?: Record<string, IProviderSetting>;
139
+ serverEnv?: Record<string, string>;
140
+ },
141
+ ): Promise<ModelInfo[]> {
142
+ const provider = this._providers.get(providerArg.name);
143
+
144
+ if (!provider) {
145
+ throw new Error(`Provider ${providerArg.name} not found`);
146
+ }
147
+
148
+ const staticModels = provider.staticModels || [];
149
+
150
+ if (!provider.getDynamicModels) {
151
+ return staticModels;
152
+ }
153
+
154
+ const { apiKeys, providerSettings, serverEnv } = options;
155
+
156
+ const cachedModels = provider.getModelsFromCache({
157
+ apiKeys,
158
+ providerSettings,
159
+ serverEnv,
160
+ });
161
+
162
+ if (cachedModels) {
163
+ logger.info(`Found ${cachedModels.length} cached models for ${provider.name}`);
164
+ return [...cachedModels, ...staticModels];
165
+ }
166
+
167
+ logger.info(`Getting dynamic models for ${provider.name}`);
168
+
169
+ const dynamicModels = await provider
170
+ .getDynamicModels?.(apiKeys, providerSettings?.[provider.name], serverEnv)
171
+ .then((models) => {
172
+ logger.info(`Got ${models.length} dynamic models for ${provider.name}`);
173
+ provider.storeDynamicModels(options, models);
174
+
175
+ return models;
176
+ })
177
+ .catch((err) => {
178
+ logger.error(`Error getting dynamic models ${provider.name} :`, err);
179
+ return [];
180
+ });
181
+
182
+ return [...dynamicModels, ...staticModels];
183
+ }
184
+ getStaticModelListFromProvider(providerArg: BaseProvider) {
185
+ const provider = this._providers.get(providerArg.name);
186
+
187
+ if (!provider) {
188
+ throw new Error(`Provider ${providerArg.name} not found`);
189
+ }
190
+
191
+ return [...(provider.staticModels || [])];
192
+ }
193
 
194
  getDefaultProvider(): BaseProvider {
195
  const firstProvider = this._providers.values().next().value;
app/lib/modules/llm/providers/huggingface.ts CHANGED
@@ -25,6 +25,30 @@ export default class HuggingFaceProvider extends BaseProvider {
25
  provider: 'HuggingFace',
26
  maxTokenAllowed: 8000,
27
  },
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
28
  {
29
  name: 'meta-llama/Llama-3.1-70B-Instruct',
30
  label: 'Llama-3.1-70B-Instruct (HuggingFace)',
@@ -37,6 +61,24 @@ export default class HuggingFaceProvider extends BaseProvider {
37
  provider: 'HuggingFace',
38
  maxTokenAllowed: 8000,
39
  },
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
40
  ];
41
 
42
  getModelInstance(options: {
 
25
  provider: 'HuggingFace',
26
  maxTokenAllowed: 8000,
27
  },
28
+ {
29
+ name: 'codellama/CodeLlama-34b-Instruct-hf',
30
+ label: 'CodeLlama-34b-Instruct (HuggingFace)',
31
+ provider: 'HuggingFace',
32
+ maxTokenAllowed: 8000,
33
+ },
34
+ {
35
+ name: 'NousResearch/Hermes-3-Llama-3.1-8B',
36
+ label: 'Hermes-3-Llama-3.1-8B (HuggingFace)',
37
+ provider: 'HuggingFace',
38
+ maxTokenAllowed: 8000,
39
+ },
40
+ {
41
+ name: 'Qwen/Qwen2.5-Coder-32B-Instruct',
42
+ label: 'Qwen2.5-Coder-32B-Instruct (HuggingFace)',
43
+ provider: 'HuggingFace',
44
+ maxTokenAllowed: 8000,
45
+ },
46
+ {
47
+ name: 'Qwen/Qwen2.5-72B-Instruct',
48
+ label: 'Qwen2.5-72B-Instruct (HuggingFace)',
49
+ provider: 'HuggingFace',
50
+ maxTokenAllowed: 8000,
51
+ },
52
  {
53
  name: 'meta-llama/Llama-3.1-70B-Instruct',
54
  label: 'Llama-3.1-70B-Instruct (HuggingFace)',
 
61
  provider: 'HuggingFace',
62
  maxTokenAllowed: 8000,
63
  },
64
+ {
65
+ name: '01-ai/Yi-1.5-34B-Chat',
66
+ label: 'Yi-1.5-34B-Chat (HuggingFace)',
67
+ provider: 'HuggingFace',
68
+ maxTokenAllowed: 8000,
69
+ },
70
+ {
71
+ name: 'codellama/CodeLlama-34b-Instruct-hf',
72
+ label: 'CodeLlama-34b-Instruct (HuggingFace)',
73
+ provider: 'HuggingFace',
74
+ maxTokenAllowed: 8000,
75
+ },
76
+ {
77
+ name: 'NousResearch/Hermes-3-Llama-3.1-8B',
78
+ label: 'Hermes-3-Llama-3.1-8B (HuggingFace)',
79
+ provider: 'HuggingFace',
80
+ maxTokenAllowed: 8000,
81
+ },
82
  ];
83
 
84
  getModelInstance(options: {
app/lib/modules/llm/providers/hyperbolic.ts CHANGED
@@ -50,40 +50,35 @@ export default class HyperbolicProvider extends BaseProvider {
50
  settings?: IProviderSetting,
51
  serverEnv: Record<string, string> = {},
52
  ): Promise<ModelInfo[]> {
53
- try {
54
- const { baseUrl: fetchBaseUrl, apiKey } = this.getProviderBaseUrlAndKey({
55
- apiKeys,
56
- providerSettings: settings,
57
- serverEnv,
58
- defaultBaseUrlKey: '',
59
- defaultApiTokenKey: 'HYPERBOLIC_API_KEY',
60
- });
61
- const baseUrl = fetchBaseUrl || 'https://api.hyperbolic.xyz/v1';
62
 
63
- if (!baseUrl || !apiKey) {
64
- return [];
65
- }
66
 
67
- const response = await fetch(`${baseUrl}/models`, {
68
- headers: {
69
- Authorization: `Bearer ${apiKey}`,
70
- },
71
- });
72
 
73
- const res = (await response.json()) as any;
74
 
75
- const data = res.data.filter((model: any) => model.object === 'model' && model.supports_chat);
76
 
77
- return data.map((m: any) => ({
78
- name: m.id,
79
- label: `${m.id} - context ${m.context_length ? Math.floor(m.context_length / 1000) + 'k' : 'N/A'}`,
80
- provider: this.name,
81
- maxTokenAllowed: m.context_length || 8000,
82
- }));
83
- } catch (error: any) {
84
- console.error('Error getting Hyperbolic models:', error.message);
85
- return [];
86
- }
87
  }
88
 
89
  getModelInstance(options: {
@@ -103,8 +98,7 @@ export default class HyperbolicProvider extends BaseProvider {
103
  });
104
 
105
  if (!apiKey) {
106
- console.log(`Missing configuration for ${this.name} provider`);
107
- throw new Error(`Missing configuration for ${this.name} provider`);
108
  }
109
 
110
  const openai = createOpenAI({
 
50
  settings?: IProviderSetting,
51
  serverEnv: Record<string, string> = {},
52
  ): Promise<ModelInfo[]> {
53
+ const { baseUrl: fetchBaseUrl, apiKey } = this.getProviderBaseUrlAndKey({
54
+ apiKeys,
55
+ providerSettings: settings,
56
+ serverEnv,
57
+ defaultBaseUrlKey: '',
58
+ defaultApiTokenKey: 'HYPERBOLIC_API_KEY',
59
+ });
60
+ const baseUrl = fetchBaseUrl || 'https://api.hyperbolic.xyz/v1';
 
61
 
62
+ if (!apiKey) {
63
+ throw `Missing Api Key configuration for ${this.name} provider`;
64
+ }
65
 
66
+ const response = await fetch(`${baseUrl}/models`, {
67
+ headers: {
68
+ Authorization: `Bearer ${apiKey}`,
69
+ },
70
+ });
71
 
72
+ const res = (await response.json()) as any;
73
 
74
+ const data = res.data.filter((model: any) => model.object === 'model' && model.supports_chat);
75
 
76
+ return data.map((m: any) => ({
77
+ name: m.id,
78
+ label: `${m.id} - context ${m.context_length ? Math.floor(m.context_length / 1000) + 'k' : 'N/A'}`,
79
+ provider: this.name,
80
+ maxTokenAllowed: m.context_length || 8000,
81
+ }));
 
 
 
 
82
  }
83
 
84
  getModelInstance(options: {
 
98
  });
99
 
100
  if (!apiKey) {
101
+ throw `Missing Api Key configuration for ${this.name} provider`;
 
102
  }
103
 
104
  const openai = createOpenAI({
app/lib/modules/llm/providers/lmstudio.ts CHANGED
@@ -22,33 +22,27 @@ export default class LMStudioProvider extends BaseProvider {
22
  settings?: IProviderSetting,
23
  serverEnv: Record<string, string> = {},
24
  ): Promise<ModelInfo[]> {
25
- try {
26
- const { baseUrl } = this.getProviderBaseUrlAndKey({
27
- apiKeys,
28
- providerSettings: settings,
29
- serverEnv,
30
- defaultBaseUrlKey: 'LMSTUDIO_API_BASE_URL',
31
- defaultApiTokenKey: '',
32
- });
33
-
34
- if (!baseUrl) {
35
- return [];
36
- }
37
-
38
- const response = await fetch(`${baseUrl}/v1/models`);
39
- const data = (await response.json()) as { data: Array<{ id: string }> };
40
-
41
- return data.data.map((model) => ({
42
- name: model.id,
43
- label: model.id,
44
- provider: this.name,
45
- maxTokenAllowed: 8000,
46
- }));
47
- } catch (error: any) {
48
- console.log('Error getting LMStudio models:', error.message);
49
 
 
50
  return [];
51
  }
 
 
 
 
 
 
 
 
 
 
52
  }
53
  getModelInstance: (options: {
54
  model: string;
 
22
  settings?: IProviderSetting,
23
  serverEnv: Record<string, string> = {},
24
  ): Promise<ModelInfo[]> {
25
+ const { baseUrl } = this.getProviderBaseUrlAndKey({
26
+ apiKeys,
27
+ providerSettings: settings,
28
+ serverEnv,
29
+ defaultBaseUrlKey: 'LMSTUDIO_API_BASE_URL',
30
+ defaultApiTokenKey: '',
31
+ });
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
32
 
33
+ if (!baseUrl) {
34
  return [];
35
  }
36
+
37
+ const response = await fetch(`${baseUrl}/v1/models`);
38
+ const data = (await response.json()) as { data: Array<{ id: string }> };
39
+
40
+ return data.data.map((model) => ({
41
+ name: model.id,
42
+ label: model.id,
43
+ provider: this.name,
44
+ maxTokenAllowed: 8000,
45
+ }));
46
  }
47
  getModelInstance: (options: {
48
  model: string;
app/lib/modules/llm/providers/ollama.ts CHANGED
@@ -45,34 +45,29 @@ export default class OllamaProvider extends BaseProvider {
45
  settings?: IProviderSetting,
46
  serverEnv: Record<string, string> = {},
47
  ): Promise<ModelInfo[]> {
48
- try {
49
- const { baseUrl } = this.getProviderBaseUrlAndKey({
50
- apiKeys,
51
- providerSettings: settings,
52
- serverEnv,
53
- defaultBaseUrlKey: 'OLLAMA_API_BASE_URL',
54
- defaultApiTokenKey: '',
55
- });
56
-
57
- if (!baseUrl) {
58
- return [];
59
- }
60
-
61
- const response = await fetch(`${baseUrl}/api/tags`);
62
- const data = (await response.json()) as OllamaApiResponse;
63
-
64
- // console.log({ ollamamodels: data.models });
65
-
66
- return data.models.map((model: OllamaModel) => ({
67
- name: model.name,
68
- label: `${model.name} (${model.details.parameter_size})`,
69
- provider: this.name,
70
- maxTokenAllowed: 8000,
71
- }));
72
- } catch (e) {
73
- console.error('Failed to get Ollama models:', e);
74
  return [];
75
  }
 
 
 
 
 
 
 
 
 
 
 
 
76
  }
77
  getModelInstance: (options: {
78
  model: string;
 
45
  settings?: IProviderSetting,
46
  serverEnv: Record<string, string> = {},
47
  ): Promise<ModelInfo[]> {
48
+ const { baseUrl } = this.getProviderBaseUrlAndKey({
49
+ apiKeys,
50
+ providerSettings: settings,
51
+ serverEnv,
52
+ defaultBaseUrlKey: 'OLLAMA_API_BASE_URL',
53
+ defaultApiTokenKey: '',
54
+ });
55
+
56
+ if (!baseUrl) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
57
  return [];
58
  }
59
+
60
+ const response = await fetch(`${baseUrl}/api/tags`);
61
+ const data = (await response.json()) as OllamaApiResponse;
62
+
63
+ // console.log({ ollamamodels: data.models });
64
+
65
+ return data.models.map((model: OllamaModel) => ({
66
+ name: model.name,
67
+ label: `${model.name} (${model.details.parameter_size})`,
68
+ provider: this.name,
69
+ maxTokenAllowed: 8000,
70
+ }));
71
  }
72
  getModelInstance: (options: {
73
  model: string;
app/lib/modules/llm/providers/open-router.ts CHANGED
@@ -27,7 +27,6 @@ export default class OpenRouterProvider extends BaseProvider {
27
  };
28
 
29
  staticModels: ModelInfo[] = [
30
- { name: 'gpt-4o', label: 'GPT-4o', provider: 'OpenAI', maxTokenAllowed: 8000 },
31
  {
32
  name: 'anthropic/claude-3.5-sonnet',
33
  label: 'Anthropic: Claude 3.5 Sonnet (OpenRouter)',
 
27
  };
28
 
29
  staticModels: ModelInfo[] = [
 
30
  {
31
  name: 'anthropic/claude-3.5-sonnet',
32
  label: 'Anthropic: Claude 3.5 Sonnet (OpenRouter)',
app/lib/modules/llm/providers/openai-like.ts CHANGED
@@ -19,37 +19,32 @@ export default class OpenAILikeProvider extends BaseProvider {
19
  settings?: IProviderSetting,
20
  serverEnv: Record<string, string> = {},
21
  ): Promise<ModelInfo[]> {
22
- try {
23
- const { baseUrl, apiKey } = this.getProviderBaseUrlAndKey({
24
- apiKeys,
25
- providerSettings: settings,
26
- serverEnv,
27
- defaultBaseUrlKey: 'OPENAI_LIKE_API_BASE_URL',
28
- defaultApiTokenKey: 'OPENAI_LIKE_API_KEY',
29
- });
30
 
31
- if (!baseUrl || !apiKey) {
32
- return [];
33
- }
34
 
35
- const response = await fetch(`${baseUrl}/models`, {
36
- headers: {
37
- Authorization: `Bearer ${apiKey}`,
38
- },
39
- });
40
 
41
- const res = (await response.json()) as any;
42
 
43
- return res.data.map((model: any) => ({
44
- name: model.id,
45
- label: model.id,
46
- provider: this.name,
47
- maxTokenAllowed: 8000,
48
- }));
49
- } catch (error) {
50
- console.error('Error getting OpenAILike models:', error);
51
- return [];
52
- }
53
  }
54
 
55
  getModelInstance(options: {
 
19
  settings?: IProviderSetting,
20
  serverEnv: Record<string, string> = {},
21
  ): Promise<ModelInfo[]> {
22
+ const { baseUrl, apiKey } = this.getProviderBaseUrlAndKey({
23
+ apiKeys,
24
+ providerSettings: settings,
25
+ serverEnv,
26
+ defaultBaseUrlKey: 'OPENAI_LIKE_API_BASE_URL',
27
+ defaultApiTokenKey: 'OPENAI_LIKE_API_KEY',
28
+ });
 
29
 
30
+ if (!baseUrl || !apiKey) {
31
+ return [];
32
+ }
33
 
34
+ const response = await fetch(`${baseUrl}/models`, {
35
+ headers: {
36
+ Authorization: `Bearer ${apiKey}`,
37
+ },
38
+ });
39
 
40
+ const res = (await response.json()) as any;
41
 
42
+ return res.data.map((model: any) => ({
43
+ name: model.id,
44
+ label: model.id,
45
+ provider: this.name,
46
+ maxTokenAllowed: 8000,
47
+ }));
 
 
 
 
48
  }
49
 
50
  getModelInstance(options: {
app/lib/modules/llm/providers/openai.ts CHANGED
@@ -13,6 +13,7 @@ export default class OpenAIProvider extends BaseProvider {
13
  };
14
 
15
  staticModels: ModelInfo[] = [
 
16
  { name: 'gpt-4o-mini', label: 'GPT-4o Mini', provider: 'OpenAI', maxTokenAllowed: 8000 },
17
  { name: 'gpt-4-turbo', label: 'GPT-4 Turbo', provider: 'OpenAI', maxTokenAllowed: 8000 },
18
  { name: 'gpt-4', label: 'GPT-4', provider: 'OpenAI', maxTokenAllowed: 8000 },
 
13
  };
14
 
15
  staticModels: ModelInfo[] = [
16
+ { name: 'gpt-4o', label: 'GPT-4o', provider: 'OpenAI', maxTokenAllowed: 8000 },
17
  { name: 'gpt-4o-mini', label: 'GPT-4o Mini', provider: 'OpenAI', maxTokenAllowed: 8000 },
18
  { name: 'gpt-4-turbo', label: 'GPT-4 Turbo', provider: 'OpenAI', maxTokenAllowed: 8000 },
19
  { name: 'gpt-4', label: 'GPT-4', provider: 'OpenAI', maxTokenAllowed: 8000 },
app/lib/modules/llm/providers/together.ts CHANGED
@@ -38,41 +38,36 @@ export default class TogetherProvider extends BaseProvider {
38
  settings?: IProviderSetting,
39
  serverEnv: Record<string, string> = {},
40
  ): Promise<ModelInfo[]> {
41
- try {
42
- const { baseUrl: fetchBaseUrl, apiKey } = this.getProviderBaseUrlAndKey({
43
- apiKeys,
44
- providerSettings: settings,
45
- serverEnv,
46
- defaultBaseUrlKey: 'TOGETHER_API_BASE_URL',
47
- defaultApiTokenKey: 'TOGETHER_API_KEY',
48
- });
49
- const baseUrl = fetchBaseUrl || 'https://api.together.xyz/v1';
50
 
51
- if (!baseUrl || !apiKey) {
52
- return [];
53
- }
54
 
55
- // console.log({ baseUrl, apiKey });
56
 
57
- const response = await fetch(`${baseUrl}/models`, {
58
- headers: {
59
- Authorization: `Bearer ${apiKey}`,
60
- },
61
- });
62
 
63
- const res = (await response.json()) as any;
64
- const data = (res || []).filter((model: any) => model.type === 'chat');
65
 
66
- return data.map((m: any) => ({
67
- name: m.id,
68
- label: `${m.display_name} - in:$${m.pricing.input.toFixed(2)} out:$${m.pricing.output.toFixed(2)} - context ${Math.floor(m.context_length / 1000)}k`,
69
- provider: this.name,
70
- maxTokenAllowed: 8000,
71
- }));
72
- } catch (error: any) {
73
- console.error('Error getting Together models:', error.message);
74
- return [];
75
- }
76
  }
77
 
78
  getModelInstance(options: {
 
38
  settings?: IProviderSetting,
39
  serverEnv: Record<string, string> = {},
40
  ): Promise<ModelInfo[]> {
41
+ const { baseUrl: fetchBaseUrl, apiKey } = this.getProviderBaseUrlAndKey({
42
+ apiKeys,
43
+ providerSettings: settings,
44
+ serverEnv,
45
+ defaultBaseUrlKey: 'TOGETHER_API_BASE_URL',
46
+ defaultApiTokenKey: 'TOGETHER_API_KEY',
47
+ });
48
+ const baseUrl = fetchBaseUrl || 'https://api.together.xyz/v1';
 
49
 
50
+ if (!baseUrl || !apiKey) {
51
+ return [];
52
+ }
53
 
54
+ // console.log({ baseUrl, apiKey });
55
 
56
+ const response = await fetch(`${baseUrl}/models`, {
57
+ headers: {
58
+ Authorization: `Bearer ${apiKey}`,
59
+ },
60
+ });
61
 
62
+ const res = (await response.json()) as any;
63
+ const data = (res || []).filter((model: any) => model.type === 'chat');
64
 
65
+ return data.map((m: any) => ({
66
+ name: m.id,
67
+ label: `${m.display_name} - in:$${m.pricing.input.toFixed(2)} out:$${m.pricing.output.toFixed(2)} - context ${Math.floor(m.context_length / 1000)}k`,
68
+ provider: this.name,
69
+ maxTokenAllowed: 8000,
70
+ }));
 
 
 
 
71
  }
72
 
73
  getModelInstance(options: {
app/lib/runtime/message-parser.ts CHANGED
@@ -55,7 +55,8 @@ interface MessageState {
55
  function cleanoutMarkdownSyntax(content: string) {
56
  const codeBlockRegex = /^\s*```\w*\n([\s\S]*?)\n\s*```\s*$/;
57
  const match = content.match(codeBlockRegex);
58
- console.log('matching', !!match, content);
 
59
 
60
  if (match) {
61
  return match[1]; // Remove common leading 4-space indent
 
55
  function cleanoutMarkdownSyntax(content: string) {
56
  const codeBlockRegex = /^\s*```\w*\n([\s\S]*?)\n\s*```\s*$/;
57
  const match = content.match(codeBlockRegex);
58
+
59
+ // console.log('matching', !!match, content);
60
 
61
  if (match) {
62
  return match[1]; // Remove common leading 4-space indent
app/lib/stores/settings.ts CHANGED
@@ -54,5 +54,5 @@ export const promptStore = atom<string>('default');
54
 
55
  export const latestBranchStore = atom(false);
56
 
57
- export const autoSelectStarterTemplate = atom(true);
58
  export const enableContextOptimizationStore = atom(false);
 
54
 
55
  export const latestBranchStore = atom(false);
56
 
57
+ export const autoSelectStarterTemplate = atom(false);
58
  export const enableContextOptimizationStore = atom(false);
app/routes/api.chat.ts CHANGED
@@ -5,11 +5,14 @@ import { CONTINUE_PROMPT } from '~/lib/common/prompts/prompts';
5
  import { streamText, type Messages, type StreamingOptions } from '~/lib/.server/llm/stream-text';
6
  import SwitchableStream from '~/lib/.server/llm/switchable-stream';
7
  import type { IProviderSetting } from '~/types/model';
 
8
 
9
  export async function action(args: ActionFunctionArgs) {
10
  return chatAction(args);
11
  }
12
 
 
 
13
  function parseCookies(cookieHeader: string): Record<string, string> {
14
  const cookies: Record<string, string> = {};
15
 
@@ -54,7 +57,7 @@ async function chatAction({ context, request }: ActionFunctionArgs) {
54
  const options: StreamingOptions = {
55
  toolChoice: 'none',
56
  onFinish: async ({ text: content, finishReason, usage }) => {
57
- console.log('usage', usage);
58
 
59
  if (usage) {
60
  cumulativeUsage.completionTokens += usage.completionTokens || 0;
@@ -63,23 +66,33 @@ async function chatAction({ context, request }: ActionFunctionArgs) {
63
  }
64
 
65
  if (finishReason !== 'length') {
66
- return stream
67
- .switchSource(
68
- createDataStream({
69
- async execute(dataStream) {
70
- dataStream.writeMessageAnnotation({
71
- type: 'usage',
72
- value: {
73
- completionTokens: cumulativeUsage.completionTokens,
74
- promptTokens: cumulativeUsage.promptTokens,
75
- totalTokens: cumulativeUsage.totalTokens,
76
- },
77
- });
78
  },
79
- onError: (error: any) => `Custom error: ${error.message}`,
80
- }),
81
- )
82
- .then(() => stream.close());
 
 
 
 
 
 
 
 
 
 
 
 
 
83
  }
84
 
85
  if (stream.switches >= MAX_RESPONSE_SEGMENTS) {
@@ -88,7 +101,7 @@ async function chatAction({ context, request }: ActionFunctionArgs) {
88
 
89
  const switchesLeft = MAX_RESPONSE_SEGMENTS - stream.switches;
90
 
91
- console.log(`Reached max token limit (${MAX_TOKENS}): Continuing message (${switchesLeft} switches left)`);
92
 
93
  messages.push({ role: 'assistant', content });
94
  messages.push({ role: 'user', content: CONTINUE_PROMPT });
@@ -104,7 +117,9 @@ async function chatAction({ context, request }: ActionFunctionArgs) {
104
  contextOptimization,
105
  });
106
 
107
- return stream.switchSource(result.toDataStream());
 
 
108
  },
109
  };
110
 
@@ -128,7 +143,7 @@ async function chatAction({ context, request }: ActionFunctionArgs) {
128
  },
129
  });
130
  } catch (error: any) {
131
- console.error(error);
132
 
133
  if (error.message?.includes('API key')) {
134
  throw new Response('Invalid or missing API key', {
 
5
  import { streamText, type Messages, type StreamingOptions } from '~/lib/.server/llm/stream-text';
6
  import SwitchableStream from '~/lib/.server/llm/switchable-stream';
7
  import type { IProviderSetting } from '~/types/model';
8
+ import { createScopedLogger } from '~/utils/logger';
9
 
10
  export async function action(args: ActionFunctionArgs) {
11
  return chatAction(args);
12
  }
13
 
14
+ const logger = createScopedLogger('api.chat');
15
+
16
  function parseCookies(cookieHeader: string): Record<string, string> {
17
  const cookies: Record<string, string> = {};
18
 
 
57
  const options: StreamingOptions = {
58
  toolChoice: 'none',
59
  onFinish: async ({ text: content, finishReason, usage }) => {
60
+ logger.debug('usage', JSON.stringify(usage));
61
 
62
  if (usage) {
63
  cumulativeUsage.completionTokens += usage.completionTokens || 0;
 
66
  }
67
 
68
  if (finishReason !== 'length') {
69
+ const encoder = new TextEncoder();
70
+ const usageStream = createDataStream({
71
+ async execute(dataStream) {
72
+ dataStream.writeMessageAnnotation({
73
+ type: 'usage',
74
+ value: {
75
+ completionTokens: cumulativeUsage.completionTokens,
76
+ promptTokens: cumulativeUsage.promptTokens,
77
+ totalTokens: cumulativeUsage.totalTokens,
 
 
 
78
  },
79
+ });
80
+ },
81
+ onError: (error: any) => `Custom error: ${error.message}`,
82
+ }).pipeThrough(
83
+ new TransformStream({
84
+ transform: (chunk, controller) => {
85
+ // Convert the string stream to a byte stream
86
+ const str = typeof chunk === 'string' ? chunk : JSON.stringify(chunk);
87
+ controller.enqueue(encoder.encode(str));
88
+ },
89
+ }),
90
+ );
91
+ await stream.switchSource(usageStream);
92
+ await new Promise((resolve) => setTimeout(resolve, 0));
93
+ stream.close();
94
+
95
+ return;
96
  }
97
 
98
  if (stream.switches >= MAX_RESPONSE_SEGMENTS) {
 
101
 
102
  const switchesLeft = MAX_RESPONSE_SEGMENTS - stream.switches;
103
 
104
+ logger.info(`Reached max token limit (${MAX_TOKENS}): Continuing message (${switchesLeft} switches left)`);
105
 
106
  messages.push({ role: 'assistant', content });
107
  messages.push({ role: 'user', content: CONTINUE_PROMPT });
 
117
  contextOptimization,
118
  });
119
 
120
+ stream.switchSource(result.toDataStream());
121
+
122
+ return;
123
  },
124
  };
125
 
 
143
  },
144
  });
145
  } catch (error: any) {
146
+ logger.error(error);
147
 
148
  if (error.message?.includes('API key')) {
149
  throw new Response('Invalid or missing API key', {
app/utils/constants.ts CHANGED
@@ -19,312 +19,6 @@ export const DEFAULT_PROVIDER = llmManager.getDefaultProvider();
19
 
20
  let MODEL_LIST = llmManager.getModelList();
21
 
22
- /*
23
- *const PROVIDER_LIST_OLD: ProviderInfo[] = [
24
- * {
25
- * name: 'Anthropic',
26
- * staticModels: [
27
- * {
28
- * name: 'claude-3-5-sonnet-latest',
29
- * label: 'Claude 3.5 Sonnet (new)',
30
- * provider: 'Anthropic',
31
- * maxTokenAllowed: 8000,
32
- * },
33
- * {
34
- * name: 'claude-3-5-sonnet-20240620',
35
- * label: 'Claude 3.5 Sonnet (old)',
36
- * provider: 'Anthropic',
37
- * maxTokenAllowed: 8000,
38
- * },
39
- * {
40
- * name: 'claude-3-5-haiku-latest',
41
- * label: 'Claude 3.5 Haiku (new)',
42
- * provider: 'Anthropic',
43
- * maxTokenAllowed: 8000,
44
- * },
45
- * { name: 'claude-3-opus-latest', label: 'Claude 3 Opus', provider: 'Anthropic', maxTokenAllowed: 8000 },
46
- * { name: 'claude-3-sonnet-20240229', label: 'Claude 3 Sonnet', provider: 'Anthropic', maxTokenAllowed: 8000 },
47
- * { name: 'claude-3-haiku-20240307', label: 'Claude 3 Haiku', provider: 'Anthropic', maxTokenAllowed: 8000 },
48
- * ],
49
- * getApiKeyLink: 'https://console.anthropic.com/settings/keys',
50
- * },
51
- * {
52
- * name: 'Ollama',
53
- * staticModels: [],
54
- * getDynamicModels: getOllamaModels,
55
- * getApiKeyLink: 'https://ollama.com/download',
56
- * labelForGetApiKey: 'Download Ollama',
57
- * icon: 'i-ph:cloud-arrow-down',
58
- * },
59
- * {
60
- * name: 'OpenAILike',
61
- * staticModels: [],
62
- * getDynamicModels: getOpenAILikeModels,
63
- * },
64
- * {
65
- * name: 'Cohere',
66
- * staticModels: [
67
- * { name: 'command-r-plus-08-2024', label: 'Command R plus Latest', provider: 'Cohere', maxTokenAllowed: 4096 },
68
- * { name: 'command-r-08-2024', label: 'Command R Latest', provider: 'Cohere', maxTokenAllowed: 4096 },
69
- * { name: 'command-r-plus', label: 'Command R plus', provider: 'Cohere', maxTokenAllowed: 4096 },
70
- * { name: 'command-r', label: 'Command R', provider: 'Cohere', maxTokenAllowed: 4096 },
71
- * { name: 'command', label: 'Command', provider: 'Cohere', maxTokenAllowed: 4096 },
72
- * { name: 'command-nightly', label: 'Command Nightly', provider: 'Cohere', maxTokenAllowed: 4096 },
73
- * { name: 'command-light', label: 'Command Light', provider: 'Cohere', maxTokenAllowed: 4096 },
74
- * { name: 'command-light-nightly', label: 'Command Light Nightly', provider: 'Cohere', maxTokenAllowed: 4096 },
75
- * { name: 'c4ai-aya-expanse-8b', label: 'c4AI Aya Expanse 8b', provider: 'Cohere', maxTokenAllowed: 4096 },
76
- * { name: 'c4ai-aya-expanse-32b', label: 'c4AI Aya Expanse 32b', provider: 'Cohere', maxTokenAllowed: 4096 },
77
- * ],
78
- * getApiKeyLink: 'https://dashboard.cohere.com/api-keys',
79
- * },
80
- * {
81
- * name: 'OpenRouter',
82
- * staticModels: [
83
- * { name: 'gpt-4o', label: 'GPT-4o', provider: 'OpenAI', maxTokenAllowed: 8000 },
84
- * {
85
- * name: 'anthropic/claude-3.5-sonnet',
86
- * label: 'Anthropic: Claude 3.5 Sonnet (OpenRouter)',
87
- * provider: 'OpenRouter',
88
- * maxTokenAllowed: 8000,
89
- * },
90
- * {
91
- * name: 'anthropic/claude-3-haiku',
92
- * label: 'Anthropic: Claude 3 Haiku (OpenRouter)',
93
- * provider: 'OpenRouter',
94
- * maxTokenAllowed: 8000,
95
- * },
96
- * {
97
- * name: 'deepseek/deepseek-coder',
98
- * label: 'Deepseek-Coder V2 236B (OpenRouter)',
99
- * provider: 'OpenRouter',
100
- * maxTokenAllowed: 8000,
101
- * },
102
- * {
103
- * name: 'google/gemini-flash-1.5',
104
- * label: 'Google Gemini Flash 1.5 (OpenRouter)',
105
- * provider: 'OpenRouter',
106
- * maxTokenAllowed: 8000,
107
- * },
108
- * {
109
- * name: 'google/gemini-pro-1.5',
110
- * label: 'Google Gemini Pro 1.5 (OpenRouter)',
111
- * provider: 'OpenRouter',
112
- * maxTokenAllowed: 8000,
113
- * },
114
- * { name: 'x-ai/grok-beta', label: 'xAI Grok Beta (OpenRouter)', provider: 'OpenRouter', maxTokenAllowed: 8000 },
115
- * {
116
- * name: 'mistralai/mistral-nemo',
117
- * label: 'OpenRouter Mistral Nemo (OpenRouter)',
118
- * provider: 'OpenRouter',
119
- * maxTokenAllowed: 8000,
120
- * },
121
- * {
122
- * name: 'qwen/qwen-110b-chat',
123
- * label: 'OpenRouter Qwen 110b Chat (OpenRouter)',
124
- * provider: 'OpenRouter',
125
- * maxTokenAllowed: 8000,
126
- * },
127
- * { name: 'cohere/command', label: 'Cohere Command (OpenRouter)', provider: 'OpenRouter', maxTokenAllowed: 4096 },
128
- * ],
129
- * getDynamicModels: getOpenRouterModels,
130
- * getApiKeyLink: 'https://openrouter.ai/settings/keys',
131
- * },
132
- * {
133
- * name: 'Google',
134
- * staticModels: [
135
- * { name: 'gemini-1.5-flash-latest', label: 'Gemini 1.5 Flash', provider: 'Google', maxTokenAllowed: 8192 },
136
- * { name: 'gemini-2.0-flash-exp', label: 'Gemini 2.0 Flash', provider: 'Google', maxTokenAllowed: 8192 },
137
- * { name: 'gemini-1.5-flash-002', label: 'Gemini 1.5 Flash-002', provider: 'Google', maxTokenAllowed: 8192 },
138
- * { name: 'gemini-1.5-flash-8b', label: 'Gemini 1.5 Flash-8b', provider: 'Google', maxTokenAllowed: 8192 },
139
- * { name: 'gemini-1.5-pro-latest', label: 'Gemini 1.5 Pro', provider: 'Google', maxTokenAllowed: 8192 },
140
- * { name: 'gemini-1.5-pro-002', label: 'Gemini 1.5 Pro-002', provider: 'Google', maxTokenAllowed: 8192 },
141
- * { name: 'gemini-exp-1206', label: 'Gemini exp-1206', provider: 'Google', maxTokenAllowed: 8192 },
142
- * ],
143
- * getApiKeyLink: 'https://aistudio.google.com/app/apikey',
144
- * },
145
- * {
146
- * name: 'Groq',
147
- * staticModels: [
148
- * { name: 'llama-3.1-8b-instant', label: 'Llama 3.1 8b (Groq)', provider: 'Groq', maxTokenAllowed: 8000 },
149
- * { name: 'llama-3.2-11b-vision-preview', label: 'Llama 3.2 11b (Groq)', provider: 'Groq', maxTokenAllowed: 8000 },
150
- * { name: 'llama-3.2-90b-vision-preview', label: 'Llama 3.2 90b (Groq)', provider: 'Groq', maxTokenAllowed: 8000 },
151
- * { name: 'llama-3.2-3b-preview', label: 'Llama 3.2 3b (Groq)', provider: 'Groq', maxTokenAllowed: 8000 },
152
- * { name: 'llama-3.2-1b-preview', label: 'Llama 3.2 1b (Groq)', provider: 'Groq', maxTokenAllowed: 8000 },
153
- * { name: 'llama-3.3-70b-versatile', label: 'Llama 3.3 70b (Groq)', provider: 'Groq', maxTokenAllowed: 8000 },
154
- * ],
155
- * getApiKeyLink: 'https://console.groq.com/keys',
156
- * },
157
- * {
158
- * name: 'HuggingFace',
159
- * staticModels: [
160
- * {
161
- * name: 'Qwen/Qwen2.5-Coder-32B-Instruct',
162
- * label: 'Qwen2.5-Coder-32B-Instruct (HuggingFace)',
163
- * provider: 'HuggingFace',
164
- * maxTokenAllowed: 8000,
165
- * },
166
- * {
167
- * name: '01-ai/Yi-1.5-34B-Chat',
168
- * label: 'Yi-1.5-34B-Chat (HuggingFace)',
169
- * provider: 'HuggingFace',
170
- * maxTokenAllowed: 8000,
171
- * },
172
- * {
173
- * name: 'codellama/CodeLlama-34b-Instruct-hf',
174
- * label: 'CodeLlama-34b-Instruct (HuggingFace)',
175
- * provider: 'HuggingFace',
176
- * maxTokenAllowed: 8000,
177
- * },
178
- * {
179
- * name: 'NousResearch/Hermes-3-Llama-3.1-8B',
180
- * label: 'Hermes-3-Llama-3.1-8B (HuggingFace)',
181
- * provider: 'HuggingFace',
182
- * maxTokenAllowed: 8000,
183
- * },
184
- * {
185
- * name: 'Qwen/Qwen2.5-Coder-32B-Instruct',
186
- * label: 'Qwen2.5-Coder-32B-Instruct (HuggingFace)',
187
- * provider: 'HuggingFace',
188
- * maxTokenAllowed: 8000,
189
- * },
190
- * {
191
- * name: 'Qwen/Qwen2.5-72B-Instruct',
192
- * label: 'Qwen2.5-72B-Instruct (HuggingFace)',
193
- * provider: 'HuggingFace',
194
- * maxTokenAllowed: 8000,
195
- * },
196
- * {
197
- * name: 'meta-llama/Llama-3.1-70B-Instruct',
198
- * label: 'Llama-3.1-70B-Instruct (HuggingFace)',
199
- * provider: 'HuggingFace',
200
- * maxTokenAllowed: 8000,
201
- * },
202
- * {
203
- * name: 'meta-llama/Llama-3.1-405B',
204
- * label: 'Llama-3.1-405B (HuggingFace)',
205
- * provider: 'HuggingFace',
206
- * maxTokenAllowed: 8000,
207
- * },
208
- * {
209
- * name: '01-ai/Yi-1.5-34B-Chat',
210
- * label: 'Yi-1.5-34B-Chat (HuggingFace)',
211
- * provider: 'HuggingFace',
212
- * maxTokenAllowed: 8000,
213
- * },
214
- * {
215
- * name: 'codellama/CodeLlama-34b-Instruct-hf',
216
- * label: 'CodeLlama-34b-Instruct (HuggingFace)',
217
- * provider: 'HuggingFace',
218
- * maxTokenAllowed: 8000,
219
- * },
220
- * {
221
- * name: 'NousResearch/Hermes-3-Llama-3.1-8B',
222
- * label: 'Hermes-3-Llama-3.1-8B (HuggingFace)',
223
- * provider: 'HuggingFace',
224
- * maxTokenAllowed: 8000,
225
- * },
226
- * ],
227
- * getApiKeyLink: 'https://huggingface.co/settings/tokens',
228
- * },
229
- * {
230
- * name: 'OpenAI',
231
- * staticModels: [
232
- * { name: 'gpt-4o-mini', label: 'GPT-4o Mini', provider: 'OpenAI', maxTokenAllowed: 8000 },
233
- * { name: 'gpt-4-turbo', label: 'GPT-4 Turbo', provider: 'OpenAI', maxTokenAllowed: 8000 },
234
- * { name: 'gpt-4', label: 'GPT-4', provider: 'OpenAI', maxTokenAllowed: 8000 },
235
- * { name: 'gpt-3.5-turbo', label: 'GPT-3.5 Turbo', provider: 'OpenAI', maxTokenAllowed: 8000 },
236
- * ],
237
- * getApiKeyLink: 'https://platform.openai.com/api-keys',
238
- * },
239
- * {
240
- * name: 'xAI',
241
- * staticModels: [{ name: 'grok-beta', label: 'xAI Grok Beta', provider: 'xAI', maxTokenAllowed: 8000 }],
242
- * getApiKeyLink: 'https://docs.x.ai/docs/quickstart#creating-an-api-key',
243
- * },
244
- * {
245
- * name: 'Deepseek',
246
- * staticModels: [
247
- * { name: 'deepseek-coder', label: 'Deepseek-Coder', provider: 'Deepseek', maxTokenAllowed: 8000 },
248
- * { name: 'deepseek-chat', label: 'Deepseek-Chat', provider: 'Deepseek', maxTokenAllowed: 8000 },
249
- * ],
250
- * getApiKeyLink: 'https://platform.deepseek.com/apiKeys',
251
- * },
252
- * {
253
- * name: 'Mistral',
254
- * staticModels: [
255
- * { name: 'open-mistral-7b', label: 'Mistral 7B', provider: 'Mistral', maxTokenAllowed: 8000 },
256
- * { name: 'open-mixtral-8x7b', label: 'Mistral 8x7B', provider: 'Mistral', maxTokenAllowed: 8000 },
257
- * { name: 'open-mixtral-8x22b', label: 'Mistral 8x22B', provider: 'Mistral', maxTokenAllowed: 8000 },
258
- * { name: 'open-codestral-mamba', label: 'Codestral Mamba', provider: 'Mistral', maxTokenAllowed: 8000 },
259
- * { name: 'open-mistral-nemo', label: 'Mistral Nemo', provider: 'Mistral', maxTokenAllowed: 8000 },
260
- * { name: 'ministral-8b-latest', label: 'Mistral 8B', provider: 'Mistral', maxTokenAllowed: 8000 },
261
- * { name: 'mistral-small-latest', label: 'Mistral Small', provider: 'Mistral', maxTokenAllowed: 8000 },
262
- * { name: 'codestral-latest', label: 'Codestral', provider: 'Mistral', maxTokenAllowed: 8000 },
263
- * { name: 'mistral-large-latest', label: 'Mistral Large Latest', provider: 'Mistral', maxTokenAllowed: 8000 },
264
- * ],
265
- * getApiKeyLink: 'https://console.mistral.ai/api-keys/',
266
- * },
267
- * {
268
- * name: 'LMStudio',
269
- * staticModels: [],
270
- * getDynamicModels: getLMStudioModels,
271
- * getApiKeyLink: 'https://lmstudio.ai/',
272
- * labelForGetApiKey: 'Get LMStudio',
273
- * icon: 'i-ph:cloud-arrow-down',
274
- * },
275
- * {
276
- * name: 'Together',
277
- * getDynamicModels: getTogetherModels,
278
- * staticModels: [
279
- * {
280
- * name: 'Qwen/Qwen2.5-Coder-32B-Instruct',
281
- * label: 'Qwen/Qwen2.5-Coder-32B-Instruct',
282
- * provider: 'Together',
283
- * maxTokenAllowed: 8000,
284
- * },
285
- * {
286
- * name: 'meta-llama/Llama-3.2-90B-Vision-Instruct-Turbo',
287
- * label: 'meta-llama/Llama-3.2-90B-Vision-Instruct-Turbo',
288
- * provider: 'Together',
289
- * maxTokenAllowed: 8000,
290
- * },
291
- *
292
- * {
293
- * name: 'mistralai/Mixtral-8x7B-Instruct-v0.1',
294
- * label: 'Mixtral 8x7B Instruct',
295
- * provider: 'Together',
296
- * maxTokenAllowed: 8192,
297
- * },
298
- * ],
299
- * getApiKeyLink: 'https://api.together.xyz/settings/api-keys',
300
- * },
301
- * {
302
- * name: 'Perplexity',
303
- * staticModels: [
304
- * {
305
- * name: 'llama-3.1-sonar-small-128k-online',
306
- * label: 'Sonar Small Online',
307
- * provider: 'Perplexity',
308
- * maxTokenAllowed: 8192,
309
- * },
310
- * {
311
- * name: 'llama-3.1-sonar-large-128k-online',
312
- * label: 'Sonar Large Online',
313
- * provider: 'Perplexity',
314
- * maxTokenAllowed: 8192,
315
- * },
316
- * {
317
- * name: 'llama-3.1-sonar-huge-128k-online',
318
- * label: 'Sonar Huge Online',
319
- * provider: 'Perplexity',
320
- * maxTokenAllowed: 8192,
321
- * },
322
- * ],
323
- * getApiKeyLink: 'https://www.perplexity.ai/settings/api',
324
- * },
325
- *];
326
- */
327
-
328
  const providerBaseUrlEnvKeys: Record<string, { baseUrlKey?: string; apiTokenKey?: string }> = {};
329
  PROVIDER_LIST.forEach((provider) => {
330
  providerBaseUrlEnvKeys[provider.name] = {
 
19
 
20
  let MODEL_LIST = llmManager.getModelList();
21
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
22
  const providerBaseUrlEnvKeys: Record<string, { baseUrlKey?: string; apiTokenKey?: string }> = {};
23
  PROVIDER_LIST.forEach((provider) => {
24
  providerBaseUrlEnvKeys[provider.name] = {
app/utils/logger.ts CHANGED
@@ -1,4 +1,7 @@
1
  export type DebugLevel = 'trace' | 'debug' | 'info' | 'warn' | 'error';
 
 
 
2
 
3
  type LoggerFunction = (...messages: any[]) => void;
4
 
@@ -13,9 +16,6 @@ interface Logger {
13
 
14
  let currentLevel: DebugLevel = (import.meta.env.VITE_LOG_LEVEL ?? import.meta.env.DEV) ? 'debug' : 'info';
15
 
16
- const isWorker = 'HTMLRewriter' in globalThis;
17
- const supportsColor = !isWorker;
18
-
19
  export const logger: Logger = {
20
  trace: (...messages: any[]) => log('trace', undefined, messages),
21
  debug: (...messages: any[]) => log('debug', undefined, messages),
@@ -63,14 +63,8 @@ function log(level: DebugLevel, scope: string | undefined, messages: any[]) {
63
  return `${acc} ${current}`;
64
  }, '');
65
 
66
- if (!supportsColor) {
67
- console.log(`[${level.toUpperCase()}]`, allMessages);
68
-
69
- return;
70
- }
71
-
72
  const labelBackgroundColor = getColorForLevel(level);
73
- const labelTextColor = level === 'warn' ? 'black' : 'white';
74
 
75
  const labelStyles = getLabelStyles(labelBackgroundColor, labelTextColor);
76
  const scopeStyles = getLabelStyles('#77828D', 'white');
@@ -81,7 +75,21 @@ function log(level: DebugLevel, scope: string | undefined, messages: any[]) {
81
  styles.push('', scopeStyles);
82
  }
83
 
84
- console.log(`%c${level.toUpperCase()}${scope ? `%c %c${scope}` : ''}`, ...styles, allMessages);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
85
  }
86
 
87
  function getLabelStyles(color: string, textColor: string) {
@@ -104,7 +112,7 @@ function getColorForLevel(level: DebugLevel): string {
104
  return '#EE4744';
105
  }
106
  default: {
107
- return 'black';
108
  }
109
  }
110
  }
 
1
  export type DebugLevel = 'trace' | 'debug' | 'info' | 'warn' | 'error';
2
+ import { Chalk } from 'chalk';
3
+
4
+ const chalk = new Chalk({ level: 3 });
5
 
6
  type LoggerFunction = (...messages: any[]) => void;
7
 
 
16
 
17
  let currentLevel: DebugLevel = (import.meta.env.VITE_LOG_LEVEL ?? import.meta.env.DEV) ? 'debug' : 'info';
18
 
 
 
 
19
  export const logger: Logger = {
20
  trace: (...messages: any[]) => log('trace', undefined, messages),
21
  debug: (...messages: any[]) => log('debug', undefined, messages),
 
63
  return `${acc} ${current}`;
64
  }, '');
65
 
 
 
 
 
 
 
66
  const labelBackgroundColor = getColorForLevel(level);
67
+ const labelTextColor = level === 'warn' ? '#000000' : '#FFFFFF';
68
 
69
  const labelStyles = getLabelStyles(labelBackgroundColor, labelTextColor);
70
  const scopeStyles = getLabelStyles('#77828D', 'white');
 
75
  styles.push('', scopeStyles);
76
  }
77
 
78
+ let labelText = formatText(` ${level.toUpperCase()} `, labelTextColor, labelBackgroundColor);
79
+
80
+ if (scope) {
81
+ labelText = `${labelText} ${formatText(` ${scope} `, '#FFFFFF', '77828D')}`;
82
+ }
83
+
84
+ if (typeof window !== 'undefined') {
85
+ console.log(`%c${level.toUpperCase()}${scope ? `%c %c${scope}` : ''}`, ...styles, allMessages);
86
+ } else {
87
+ console.log(`${labelText}`, allMessages);
88
+ }
89
+ }
90
+
91
+ function formatText(text: string, color: string, bg: string) {
92
+ return chalk.bgHex(bg)(chalk.hex(color)(text));
93
  }
94
 
95
  function getLabelStyles(color: string, textColor: string) {
 
112
  return '#EE4744';
113
  }
114
  default: {
115
+ return '#000000';
116
  }
117
  }
118
  }
app/utils/selectStarterTemplate.ts CHANGED
@@ -27,7 +27,7 @@ ${templates
27
  Response Format:
28
  <selection>
29
  <templateName>{selected template name}</templateName>
30
- <reasoning>{brief explanation for the choice}</reasoning>
31
  </selection>
32
 
33
  Examples:
@@ -37,7 +37,7 @@ User: I need to build a todo app
37
  Response:
38
  <selection>
39
  <templateName>react-basic-starter</templateName>
40
- <reasoning>Simple React setup perfect for building a todo application</reasoning>
41
  </selection>
42
  </example>
43
 
@@ -46,7 +46,7 @@ User: Write a script to generate numbers from 1 to 100
46
  Response:
47
  <selection>
48
  <templateName>blank</templateName>
49
- <reasoning>This is a simple script that doesn't require any template setup</reasoning>
50
  </selection>
51
  </example>
52
 
@@ -62,16 +62,17 @@ Important: Provide only the selection tags in your response, no additional text.
62
 
63
  const templates: Template[] = STARTER_TEMPLATES.filter((t) => !t.name.includes('shadcn'));
64
 
65
- const parseSelectedTemplate = (llmOutput: string): string | null => {
66
  try {
67
  // Extract content between <templateName> tags
68
  const templateNameMatch = llmOutput.match(/<templateName>(.*?)<\/templateName>/);
 
69
 
70
  if (!templateNameMatch) {
71
  return null;
72
  }
73
 
74
- return templateNameMatch[1].trim();
75
  } catch (error) {
76
  console.error('Error parsing template selection:', error);
77
  return null;
@@ -101,7 +102,10 @@ export const selectStarterTemplate = async (options: { message: string; model: s
101
  } else {
102
  console.log('No template selected, using blank template');
103
 
104
- return 'blank';
 
 
 
105
  }
106
  };
107
 
@@ -181,7 +185,7 @@ const getGitHubRepoContent = async (
181
  }
182
  };
183
 
184
- export async function getTemplates(templateName: string) {
185
  const template = STARTER_TEMPLATES.find((t) => t.name == templateName);
186
 
187
  if (!template) {
@@ -211,7 +215,7 @@ export async function getTemplates(templateName: string) {
211
 
212
  const filesToImport = {
213
  files: filteredFiles,
214
- ignoreFile: filteredFiles,
215
  };
216
 
217
  if (templateIgnoreFile) {
@@ -227,7 +231,7 @@ export async function getTemplates(templateName: string) {
227
  }
228
 
229
  const assistantMessage = `
230
- <boltArtifact id="imported-files" title="Importing Starter Files" type="bundled">
231
  ${filesToImport.files
232
  .map(
233
  (file) =>
@@ -278,10 +282,16 @@ Any attempt to modify these protected files will result in immediate termination
278
  If you need to make changes to functionality, create new files instead of modifying the protected ones listed above.
279
  ---
280
  `;
281
- userMessage += `
 
 
 
 
 
 
 
282
  Now that the Template is imported please continue with my original request
283
  `;
284
- }
285
 
286
  return {
287
  assistantMessage,
 
27
  Response Format:
28
  <selection>
29
  <templateName>{selected template name}</templateName>
30
+ <title>{a proper title for the project}</title>
31
  </selection>
32
 
33
  Examples:
 
37
  Response:
38
  <selection>
39
  <templateName>react-basic-starter</templateName>
40
+ <title>Simple React todo application</title>
41
  </selection>
42
  </example>
43
 
 
46
  Response:
47
  <selection>
48
  <templateName>blank</templateName>
49
+ <title>script to generate numbers from 1 to 100</title>
50
  </selection>
51
  </example>
52
 
 
62
 
63
  const templates: Template[] = STARTER_TEMPLATES.filter((t) => !t.name.includes('shadcn'));
64
 
65
+ const parseSelectedTemplate = (llmOutput: string): { template: string; title: string } | null => {
66
  try {
67
  // Extract content between <templateName> tags
68
  const templateNameMatch = llmOutput.match(/<templateName>(.*?)<\/templateName>/);
69
+ const titleMatch = llmOutput.match(/<title>(.*?)<\/title>/);
70
 
71
  if (!templateNameMatch) {
72
  return null;
73
  }
74
 
75
+ return { template: templateNameMatch[1].trim(), title: titleMatch?.[1].trim() || 'Untitled Project' };
76
  } catch (error) {
77
  console.error('Error parsing template selection:', error);
78
  return null;
 
102
  } else {
103
  console.log('No template selected, using blank template');
104
 
105
+ return {
106
+ template: 'blank',
107
+ title: '',
108
+ };
109
  }
110
  };
111
 
 
185
  }
186
  };
187
 
188
+ export async function getTemplates(templateName: string, title?: string) {
189
  const template = STARTER_TEMPLATES.find((t) => t.name == templateName);
190
 
191
  if (!template) {
 
215
 
216
  const filesToImport = {
217
  files: filteredFiles,
218
+ ignoreFile: [] as typeof filteredFiles,
219
  };
220
 
221
  if (templateIgnoreFile) {
 
231
  }
232
 
233
  const assistantMessage = `
234
+ <boltArtifact id="imported-files" title="${title || 'Importing Starter Files'}" type="bundled">
235
  ${filesToImport.files
236
  .map(
237
  (file) =>
 
282
  If you need to make changes to functionality, create new files instead of modifying the protected ones listed above.
283
  ---
284
  `;
285
+ }
286
+
287
+ userMessage += `
288
+ ---
289
+ template import is done, and you can now use the imported files,
290
+ edit only the files that need to be changed, and you can create new files as needed.
291
+ NO NOT EDIT/WRITE ANY FILES THAT ALREADY EXIST IN THE PROJECT AND DOES NOT NEED TO BE MODIFIED
292
+ ---
293
  Now that the Template is imported please continue with my original request
294
  `;
 
295
 
296
  return {
297
  assistantMessage,
package.json CHANGED
@@ -74,6 +74,7 @@
74
  "@xterm/addon-web-links": "^0.11.0",
75
  "@xterm/xterm": "^5.5.0",
76
  "ai": "^4.0.13",
 
77
  "date-fns": "^3.6.0",
78
  "diff": "^5.2.0",
79
  "dotenv": "^16.4.7",
 
74
  "@xterm/addon-web-links": "^0.11.0",
75
  "@xterm/xterm": "^5.5.0",
76
  "ai": "^4.0.13",
77
+ "chalk": "^5.4.1",
78
  "date-fns": "^3.6.0",
79
  "diff": "^5.2.0",
80
  "dotenv": "^16.4.7",
pnpm-lock.yaml CHANGED
@@ -143,6 +143,9 @@ importers:
143
  ai:
144
  specifier: ^4.0.13
145
  version: 4.0.18([email protected])([email protected])
 
 
 
146
  date-fns:
147
  specifier: ^3.6.0
148
  version: 3.6.0
@@ -2604,8 +2607,8 @@ packages:
2604
  resolution: {integrity: sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==}
2605
  engines: {node: '>=10'}
2606
 
2607
- chalk@5.3.0:
2608
- resolution: {integrity: sha512-dLitG79d+GV1Nb/VYcCDFivJeK1hiukt9QjRNVOsUtTy1rR1YJsmpGGTZ3qJos+uw7WmWF4wUwBd9jxjocFC2w==}
2609
  engines: {node: ^12.17.0 || ^14.13 || >=16.0.0}
2610
 
2611
@@ -8207,7 +8210,7 @@ snapshots:
8207
  ansi-styles: 4.3.0
8208
  supports-color: 7.2.0
8209
 
8210
- chalk@5.3.0: {}
8211
 
8212
8213
 
@@ -9415,7 +9418,7 @@ snapshots:
9415
9416
  dependencies:
9417
  '@types/diff-match-patch': 1.0.36
9418
- chalk: 5.3.0
9419
  diff-match-patch: 1.0.5
9420
 
9421
 
143
  ai:
144
  specifier: ^4.0.13
145
  version: 4.0.18([email protected])([email protected])
146
+ chalk:
147
+ specifier: ^5.4.1
148
+ version: 5.4.1
149
  date-fns:
150
  specifier: ^3.6.0
151
  version: 3.6.0
 
2607
  resolution: {integrity: sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==}
2608
  engines: {node: '>=10'}
2609
 
2610
+ chalk@5.4.1:
2611
+ resolution: {integrity: sha512-zgVZuo2WcZgfUEmsn6eO3kINexW8RAE4maiQ8QNs8CtpPCSyMiYsULR3HQYkm3w8FIA3SberyMJMSldGsW+U3w==}
2612
  engines: {node: ^12.17.0 || ^14.13 || >=16.0.0}
2613
 
2614
 
8210
  ansi-styles: 4.3.0
8211
  supports-color: 7.2.0
8212
 
8213
+ chalk@5.4.1: {}
8214
 
8215
8216
 
 
9418
9419
  dependencies:
9420
  '@types/diff-match-patch': 1.0.36
9421
+ chalk: 5.4.1
9422
  diff-match-patch: 1.0.5
9423
 
9424