Added sanitization for user messages.
Browse filesUse regex defined in constants.ts instead of redefining.
app/components/chat/UserMessage.tsx
CHANGED
@@ -1,7 +1,7 @@
|
|
1 |
// @ts-nocheck
|
2 |
// Preventing TS checks with files presented in the video for a better presentation.
|
3 |
import { modificationsRegex } from '~/utils/diff';
|
4 |
-
import { MODEL_REGEX } from '~/utils/constants';
|
5 |
import { Markdown } from './Markdown';
|
6 |
|
7 |
interface UserMessageProps {
|
@@ -17,5 +17,5 @@ export function UserMessage({ content }: UserMessageProps) {
|
|
17 |
}
|
18 |
|
19 |
function sanitizeUserMessage(content: string) {
|
20 |
-
return content.replace(modificationsRegex, '').replace(MODEL_REGEX, '').trim();
|
21 |
}
|
|
|
1 |
// @ts-nocheck
|
2 |
// Preventing TS checks with files presented in the video for a better presentation.
|
3 |
import { modificationsRegex } from '~/utils/diff';
|
4 |
+
import { MODEL_REGEX, PROVIDER_REGEX } from '~/utils/constants';
|
5 |
import { Markdown } from './Markdown';
|
6 |
|
7 |
interface UserMessageProps {
|
|
|
17 |
}
|
18 |
|
19 |
function sanitizeUserMessage(content: string) {
|
20 |
+
return content.replace(modificationsRegex, '').replace(MODEL_REGEX, '').replace(PROVIDER_REGEX, '').trim();
|
21 |
}
|
app/lib/.server/llm/stream-text.ts
CHANGED
@@ -4,7 +4,7 @@ import { streamText as _streamText, convertToCoreMessages } from 'ai';
|
|
4 |
import { getModel } from '~/lib/.server/llm/model';
|
5 |
import { MAX_TOKENS } from './constants';
|
6 |
import { getSystemPrompt } from './prompts';
|
7 |
-
import { MODEL_LIST, DEFAULT_MODEL, DEFAULT_PROVIDER } from '~/utils/constants';
|
8 |
|
9 |
interface ToolResult<Name extends string, Args, Result> {
|
10 |
toolCallId: string;
|
@@ -25,21 +25,18 @@ export type Messages = Message[];
|
|
25 |
export type StreamingOptions = Omit<Parameters<typeof _streamText>[0], 'model'>;
|
26 |
|
27 |
function extractPropertiesFromMessage(message: Message): { model: string; provider: string; content: string } {
|
28 |
-
const modelRegex = /^\[Model: (.*?)\]\n\n/;
|
29 |
-
const providerRegex = /\[Provider: (.*?)\]\n\n/;
|
30 |
-
|
31 |
// Extract model
|
32 |
-
const modelMatch = message.content.match(
|
33 |
const model = modelMatch ? modelMatch[1] : DEFAULT_MODEL;
|
34 |
|
35 |
// Extract provider
|
36 |
-
const providerMatch = message.content.match(
|
37 |
const provider = providerMatch ? providerMatch[1] : DEFAULT_PROVIDER;
|
38 |
|
39 |
// Remove model and provider lines from content
|
40 |
const cleanedContent = message.content
|
41 |
-
.replace(
|
42 |
-
.replace(
|
43 |
.trim();
|
44 |
|
45 |
return { model, provider, content: cleanedContent };
|
|
|
4 |
import { getModel } from '~/lib/.server/llm/model';
|
5 |
import { MAX_TOKENS } from './constants';
|
6 |
import { getSystemPrompt } from './prompts';
|
7 |
+
import { MODEL_LIST, DEFAULT_MODEL, DEFAULT_PROVIDER, MODEL_REGEX, PROVIDER_REGEX } from '~/utils/constants';
|
8 |
|
9 |
interface ToolResult<Name extends string, Args, Result> {
|
10 |
toolCallId: string;
|
|
|
25 |
export type StreamingOptions = Omit<Parameters<typeof _streamText>[0], 'model'>;
|
26 |
|
27 |
function extractPropertiesFromMessage(message: Message): { model: string; provider: string; content: string } {
|
|
|
|
|
|
|
28 |
// Extract model
|
29 |
+
const modelMatch = message.content.match(MODEL_REGEX);
|
30 |
const model = modelMatch ? modelMatch[1] : DEFAULT_MODEL;
|
31 |
|
32 |
// Extract provider
|
33 |
+
const providerMatch = message.content.match(PROVIDER_REGEX);
|
34 |
const provider = providerMatch ? providerMatch[1] : DEFAULT_PROVIDER;
|
35 |
|
36 |
// Remove model and provider lines from content
|
37 |
const cleanedContent = message.content
|
38 |
+
.replace(MODEL_REGEX, '')
|
39 |
+
.replace(PROVIDER_REGEX, '')
|
40 |
.trim();
|
41 |
|
42 |
return { model, provider, content: cleanedContent };
|
app/utils/constants.ts
CHANGED
@@ -4,6 +4,7 @@ export const WORK_DIR_NAME = 'project';
|
|
4 |
export const WORK_DIR = `/home/${WORK_DIR_NAME}`;
|
5 |
export const MODIFICATIONS_TAG_NAME = 'bolt_file_modifications';
|
6 |
export const MODEL_REGEX = /^\[Model: (.*?)\]\n\n/;
|
|
|
7 |
export const DEFAULT_MODEL = 'claude-3-5-sonnet-20240620';
|
8 |
export const DEFAULT_PROVIDER = 'Anthropic';
|
9 |
|
@@ -19,7 +20,7 @@ const staticModels: ModelInfo[] = [
|
|
19 |
{ name: 'qwen/qwen-110b-chat', label: 'OpenRouter Qwen 110b Chat (OpenRouter)', provider: 'OpenRouter' },
|
20 |
{ name: 'cohere/command', label: 'Cohere Command (OpenRouter)', provider: 'OpenRouter' },
|
21 |
{ name: 'gemini-1.5-flash-latest', label: 'Gemini 1.5 Flash', provider: 'Google' },
|
22 |
-
{ name: 'gemini-1.5-pro-latest', label: 'Gemini 1.5 Pro', provider: 'Google'},
|
23 |
{ name: 'llama-3.1-70b-versatile', label: 'Llama 3.1 70b (Groq)', provider: 'Groq' },
|
24 |
{ name: 'llama-3.1-8b-instant', label: 'Llama 3.1 8b (Groq)', provider: 'Groq' },
|
25 |
{ name: 'llama-3.2-11b-vision-preview', label: 'Llama 3.2 11b (Groq)', provider: 'Groq' },
|
@@ -32,8 +33,8 @@ const staticModels: ModelInfo[] = [
|
|
32 |
{ name: 'gpt-4-turbo', label: 'GPT-4 Turbo', provider: 'OpenAI' },
|
33 |
{ name: 'gpt-4', label: 'GPT-4', provider: 'OpenAI' },
|
34 |
{ name: 'gpt-3.5-turbo', label: 'GPT-3.5 Turbo', provider: 'OpenAI' },
|
35 |
-
{ name: 'deepseek-coder', label: 'Deepseek-Coder', provider: 'Deepseek'},
|
36 |
-
{ name: 'deepseek-chat', label: 'Deepseek-Chat', provider: 'Deepseek'},
|
37 |
{ name: 'open-mistral-7b', label: 'Mistral 7B', provider: 'Mistral' },
|
38 |
{ name: 'open-mixtral-8x7b', label: 'Mistral 8x7B', provider: 'Mistral' },
|
39 |
{ name: 'open-mixtral-8x22b', label: 'Mistral 8x22B', provider: 'Mistral' },
|
@@ -54,11 +55,11 @@ const getOllamaBaseUrl = () => {
|
|
54 |
// Frontend always uses localhost
|
55 |
return defaultBaseUrl;
|
56 |
}
|
57 |
-
|
58 |
// Backend: Check if we're running in Docker
|
59 |
const isDocker = process.env.RUNNING_IN_DOCKER === 'true';
|
60 |
-
|
61 |
-
return isDocker
|
62 |
? defaultBaseUrl.replace("localhost", "host.docker.internal")
|
63 |
: defaultBaseUrl;
|
64 |
};
|
@@ -80,32 +81,32 @@ async function getOllamaModels(): Promise<ModelInfo[]> {
|
|
80 |
}
|
81 |
|
82 |
async function getOpenAILikeModels(): Promise<ModelInfo[]> {
|
83 |
-
|
84 |
-
|
85 |
-
|
86 |
return [];
|
87 |
-
|
88 |
-
|
89 |
-
|
90 |
-
|
91 |
-
|
92 |
-
|
93 |
-
|
94 |
const res = await response.json() as any;
|
95 |
return res.data.map((model: any) => ({
|
96 |
name: model.id,
|
97 |
label: model.id,
|
98 |
provider: 'OpenAILike',
|
99 |
}));
|
100 |
-
|
101 |
-
|
102 |
-
|
103 |
|
104 |
}
|
105 |
async function initializeModelList(): Promise<void> {
|
106 |
const ollamaModels = await getOllamaModels();
|
107 |
const openAiLikeModels = await getOpenAILikeModels();
|
108 |
-
MODEL_LIST = [...ollamaModels
|
109 |
}
|
110 |
initializeModelList().then();
|
111 |
export { getOllamaModels, getOpenAILikeModels, initializeModelList };
|
|
|
4 |
export const WORK_DIR = `/home/${WORK_DIR_NAME}`;
|
5 |
export const MODIFICATIONS_TAG_NAME = 'bolt_file_modifications';
|
6 |
export const MODEL_REGEX = /^\[Model: (.*?)\]\n\n/;
|
7 |
+
export const PROVIDER_REGEX = /\[Provider: (.*?)\]\n\n/;
|
8 |
export const DEFAULT_MODEL = 'claude-3-5-sonnet-20240620';
|
9 |
export const DEFAULT_PROVIDER = 'Anthropic';
|
10 |
|
|
|
20 |
{ name: 'qwen/qwen-110b-chat', label: 'OpenRouter Qwen 110b Chat (OpenRouter)', provider: 'OpenRouter' },
|
21 |
{ name: 'cohere/command', label: 'Cohere Command (OpenRouter)', provider: 'OpenRouter' },
|
22 |
{ name: 'gemini-1.5-flash-latest', label: 'Gemini 1.5 Flash', provider: 'Google' },
|
23 |
+
{ name: 'gemini-1.5-pro-latest', label: 'Gemini 1.5 Pro', provider: 'Google' },
|
24 |
{ name: 'llama-3.1-70b-versatile', label: 'Llama 3.1 70b (Groq)', provider: 'Groq' },
|
25 |
{ name: 'llama-3.1-8b-instant', label: 'Llama 3.1 8b (Groq)', provider: 'Groq' },
|
26 |
{ name: 'llama-3.2-11b-vision-preview', label: 'Llama 3.2 11b (Groq)', provider: 'Groq' },
|
|
|
33 |
{ name: 'gpt-4-turbo', label: 'GPT-4 Turbo', provider: 'OpenAI' },
|
34 |
{ name: 'gpt-4', label: 'GPT-4', provider: 'OpenAI' },
|
35 |
{ name: 'gpt-3.5-turbo', label: 'GPT-3.5 Turbo', provider: 'OpenAI' },
|
36 |
+
{ name: 'deepseek-coder', label: 'Deepseek-Coder', provider: 'Deepseek' },
|
37 |
+
{ name: 'deepseek-chat', label: 'Deepseek-Chat', provider: 'Deepseek' },
|
38 |
{ name: 'open-mistral-7b', label: 'Mistral 7B', provider: 'Mistral' },
|
39 |
{ name: 'open-mixtral-8x7b', label: 'Mistral 8x7B', provider: 'Mistral' },
|
40 |
{ name: 'open-mixtral-8x22b', label: 'Mistral 8x22B', provider: 'Mistral' },
|
|
|
55 |
// Frontend always uses localhost
|
56 |
return defaultBaseUrl;
|
57 |
}
|
58 |
+
|
59 |
// Backend: Check if we're running in Docker
|
60 |
const isDocker = process.env.RUNNING_IN_DOCKER === 'true';
|
61 |
+
|
62 |
+
return isDocker
|
63 |
? defaultBaseUrl.replace("localhost", "host.docker.internal")
|
64 |
: defaultBaseUrl;
|
65 |
};
|
|
|
81 |
}
|
82 |
|
83 |
async function getOpenAILikeModels(): Promise<ModelInfo[]> {
|
84 |
+
try {
|
85 |
+
const base_url = import.meta.env.OPENAI_LIKE_API_BASE_URL || "";
|
86 |
+
if (!base_url) {
|
87 |
return [];
|
88 |
+
}
|
89 |
+
const api_key = import.meta.env.OPENAI_LIKE_API_KEY ?? "";
|
90 |
+
const response = await fetch(`${base_url}/models`, {
|
91 |
+
headers: {
|
92 |
+
Authorization: `Bearer ${api_key}`,
|
93 |
+
}
|
94 |
+
});
|
95 |
const res = await response.json() as any;
|
96 |
return res.data.map((model: any) => ({
|
97 |
name: model.id,
|
98 |
label: model.id,
|
99 |
provider: 'OpenAILike',
|
100 |
}));
|
101 |
+
} catch (e) {
|
102 |
+
return []
|
103 |
+
}
|
104 |
|
105 |
}
|
106 |
async function initializeModelList(): Promise<void> {
|
107 |
const ollamaModels = await getOllamaModels();
|
108 |
const openAiLikeModels = await getOpenAILikeModels();
|
109 |
+
MODEL_LIST = [...ollamaModels, ...openAiLikeModels, ...staticModels];
|
110 |
}
|
111 |
initializeModelList().then();
|
112 |
export { getOllamaModels, getOpenAILikeModels, initializeModelList };
|