Merge remote-tracking branch 'upstream/main'
Browse files- .env.example +13 -1
- .gitignore +1 -0
- CONTRIBUTING.md +16 -0
- Dockerfile +6 -2
- README.md +14 -7
- app/components/chat/APIKeyManager.tsx +26 -21
- app/components/chat/BaseChat.tsx +20 -19
- app/components/chat/Messages.client.tsx +50 -47
- app/components/header/HeaderActionButtons.client.tsx +4 -1
- app/components/workbench/Workbench.client.tsx +19 -11
- app/lib/.server/llm/api-key.ts +2 -0
- app/lib/.server/llm/model.ts +18 -9
- app/lib/.server/llm/stream-text.ts +16 -25
- app/lib/hooks/index.ts +1 -0
- app/lib/hooks/usePromptEnhancer.ts +19 -18
- app/lib/hooks/useViewport.ts +18 -0
- app/routes/api.enhancer.ts +19 -14
- app/utils/constants.ts +63 -44
- app/utils/types.ts +1 -0
- docker-compose.yaml +2 -0
- package.json +1 -0
- pnpm-lock.yaml +47 -0
.env.example
CHANGED
@@ -49,6 +49,10 @@ OPENAI_LIKE_API_KEY=
|
|
49 |
# You only need this environment variable set if you want to use Mistral models
|
50 |
MISTRAL_API_KEY=
|
51 |
|
|
|
|
|
|
|
|
|
52 |
|
53 |
# Get LMStudio Base URL from LM Studio Developer Console
|
54 |
# Make sure to enable CORS
|
@@ -61,4 +65,12 @@ LMSTUDIO_API_BASE_URL=
|
|
61 |
XAI_API_KEY=
|
62 |
|
63 |
# Include this environment variable if you want more logging for debugging locally
|
64 |
-
VITE_LOG_LEVEL=debug
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
49 |
# You only need this environment variable set if you want to use Mistral models
|
50 |
MISTRAL_API_KEY=
|
51 |
|
52 |
+
# Get the Cohere Api key by following these instructions -
|
53 |
+
# https://dashboard.cohere.com/api-keys
|
54 |
+
# You only need this environment variable set if you want to use Cohere models
|
55 |
+
COHERE_API_KEY=
|
56 |
|
57 |
# Get LMStudio Base URL from LM Studio Developer Console
|
58 |
# Make sure to enable CORS
|
|
|
65 |
XAI_API_KEY=
|
66 |
|
67 |
# Include this environment variable if you want more logging for debugging locally
|
68 |
+
VITE_LOG_LEVEL=debug
|
69 |
+
|
70 |
+
# Example Context Values for qwen2.5-coder:32b
|
71 |
+
#
|
72 |
+
# DEFAULT_NUM_CTX=32768 # Consumes 36GB of VRAM
|
73 |
+
# DEFAULT_NUM_CTX=24576 # Consumes 32GB of VRAM
|
74 |
+
# DEFAULT_NUM_CTX=12288 # Consumes 26GB of VRAM
|
75 |
+
# DEFAULT_NUM_CTX=6144 # Consumes 24GB of VRAM
|
76 |
+
DEFAULT_NUM_CTX=
|
.gitignore
CHANGED
@@ -22,6 +22,7 @@ dist-ssr
|
|
22 |
*.sln
|
23 |
*.sw?
|
24 |
|
|
|
25 |
/.cache
|
26 |
/build
|
27 |
.env.local
|
|
|
22 |
*.sln
|
23 |
*.sw?
|
24 |
|
25 |
+
/.history
|
26 |
/.cache
|
27 |
/build
|
28 |
.env.local
|
CONTRIBUTING.md
CHANGED
@@ -1,4 +1,7 @@
|
|
1 |
# Contributing to Bolt.new Fork
|
|
|
|
|
|
|
2 |
|
3 |
First off, thank you for considering contributing to Bolt.new! This fork aims to expand the capabilities of the original project by integrating multiple LLM providers and enhancing functionality. Every contribution helps make Bolt.new a better tool for developers worldwide.
|
4 |
|
@@ -81,6 +84,19 @@ ANTHROPIC_API_KEY=XXX
|
|
81 |
```bash
|
82 |
VITE_LOG_LEVEL=debug
|
83 |
```
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
84 |
**Important**: Never commit your `.env.local` file to version control. It's already included in .gitignore.
|
85 |
|
86 |
### 🚀 Running the Development Server
|
|
|
1 |
# Contributing to Bolt.new Fork
|
2 |
+
## DEFAULT_NUM_CTX
|
3 |
+
|
4 |
+
The `DEFAULT_NUM_CTX` environment variable can be used to limit the maximum number of context values used by the qwen2.5-coder model. For example, to limit the context to 24576 values (which uses 32GB of VRAM), set `DEFAULT_NUM_CTX=24576` in your `.env.local` file.
|
5 |
|
6 |
First off, thank you for considering contributing to Bolt.new! This fork aims to expand the capabilities of the original project by integrating multiple LLM providers and enhancing functionality. Every contribution helps make Bolt.new a better tool for developers worldwide.
|
7 |
|
|
|
84 |
```bash
|
85 |
VITE_LOG_LEVEL=debug
|
86 |
```
|
87 |
+
|
88 |
+
- Optionally set context size:
|
89 |
+
```bash
|
90 |
+
DEFAULT_NUM_CTX=32768
|
91 |
+
```
|
92 |
+
|
93 |
+
Some Example Context Values for the qwen2.5-coder:32b models are.
|
94 |
+
|
95 |
+
* DEFAULT_NUM_CTX=32768 - Consumes 36GB of VRAM
|
96 |
+
* DEFAULT_NUM_CTX=24576 - Consumes 32GB of VRAM
|
97 |
+
* DEFAULT_NUM_CTX=12288 - Consumes 26GB of VRAM
|
98 |
+
* DEFAULT_NUM_CTX=6144 - Consumes 24GB of VRAM
|
99 |
+
|
100 |
**Important**: Never commit your `.env.local` file to version control. It's already included in .gitignore.
|
101 |
|
102 |
### 🚀 Running the Development Server
|
Dockerfile
CHANGED
@@ -26,6 +26,7 @@ ARG OPEN_ROUTER_API_KEY
|
|
26 |
ARG GOOGLE_GENERATIVE_AI_API_KEY
|
27 |
ARG OLLAMA_API_BASE_URL
|
28 |
ARG VITE_LOG_LEVEL=debug
|
|
|
29 |
|
30 |
ENV WRANGLER_SEND_METRICS=false \
|
31 |
GROQ_API_KEY=${GROQ_API_KEY} \
|
@@ -35,7 +36,8 @@ ENV WRANGLER_SEND_METRICS=false \
|
|
35 |
OPEN_ROUTER_API_KEY=${OPEN_ROUTER_API_KEY} \
|
36 |
GOOGLE_GENERATIVE_AI_API_KEY=${GOOGLE_GENERATIVE_AI_API_KEY} \
|
37 |
OLLAMA_API_BASE_URL=${OLLAMA_API_BASE_URL} \
|
38 |
-
VITE_LOG_LEVEL=${VITE_LOG_LEVEL}
|
|
|
39 |
|
40 |
# Pre-configure wrangler to disable metrics
|
41 |
RUN mkdir -p /root/.config/.wrangler && \
|
@@ -57,6 +59,7 @@ ARG OPEN_ROUTER_API_KEY
|
|
57 |
ARG GOOGLE_GENERATIVE_AI_API_KEY
|
58 |
ARG OLLAMA_API_BASE_URL
|
59 |
ARG VITE_LOG_LEVEL=debug
|
|
|
60 |
|
61 |
ENV GROQ_API_KEY=${GROQ_API_KEY} \
|
62 |
HuggingFace_API_KEY=${HuggingFace_API_KEY} \
|
@@ -65,7 +68,8 @@ ENV GROQ_API_KEY=${GROQ_API_KEY} \
|
|
65 |
OPEN_ROUTER_API_KEY=${OPEN_ROUTER_API_KEY} \
|
66 |
GOOGLE_GENERATIVE_AI_API_KEY=${GOOGLE_GENERATIVE_AI_API_KEY} \
|
67 |
OLLAMA_API_BASE_URL=${OLLAMA_API_BASE_URL} \
|
68 |
-
VITE_LOG_LEVEL=${VITE_LOG_LEVEL}
|
|
|
69 |
|
70 |
RUN mkdir -p ${WORKDIR}/run
|
71 |
CMD pnpm run dev --host
|
|
|
26 |
ARG GOOGLE_GENERATIVE_AI_API_KEY
|
27 |
ARG OLLAMA_API_BASE_URL
|
28 |
ARG VITE_LOG_LEVEL=debug
|
29 |
+
ARG DEFAULT_NUM_CTX
|
30 |
|
31 |
ENV WRANGLER_SEND_METRICS=false \
|
32 |
GROQ_API_KEY=${GROQ_API_KEY} \
|
|
|
36 |
OPEN_ROUTER_API_KEY=${OPEN_ROUTER_API_KEY} \
|
37 |
GOOGLE_GENERATIVE_AI_API_KEY=${GOOGLE_GENERATIVE_AI_API_KEY} \
|
38 |
OLLAMA_API_BASE_URL=${OLLAMA_API_BASE_URL} \
|
39 |
+
VITE_LOG_LEVEL=${VITE_LOG_LEVEL} \
|
40 |
+
DEFAULT_NUM_CTX=${DEFAULT_NUM_CTX}
|
41 |
|
42 |
# Pre-configure wrangler to disable metrics
|
43 |
RUN mkdir -p /root/.config/.wrangler && \
|
|
|
59 |
ARG GOOGLE_GENERATIVE_AI_API_KEY
|
60 |
ARG OLLAMA_API_BASE_URL
|
61 |
ARG VITE_LOG_LEVEL=debug
|
62 |
+
ARG DEFAULT_NUM_CTX
|
63 |
|
64 |
ENV GROQ_API_KEY=${GROQ_API_KEY} \
|
65 |
HuggingFace_API_KEY=${HuggingFace_API_KEY} \
|
|
|
68 |
OPEN_ROUTER_API_KEY=${OPEN_ROUTER_API_KEY} \
|
69 |
GOOGLE_GENERATIVE_AI_API_KEY=${GOOGLE_GENERATIVE_AI_API_KEY} \
|
70 |
OLLAMA_API_BASE_URL=${OLLAMA_API_BASE_URL} \
|
71 |
+
VITE_LOG_LEVEL=${VITE_LOG_LEVEL} \
|
72 |
+
DEFAULT_NUM_CTX=${DEFAULT_NUM_CTX}
|
73 |
|
74 |
RUN mkdir -p ${WORKDIR}/run
|
75 |
CMD pnpm run dev --host
|
README.md
CHANGED
@@ -1,8 +1,12 @@
|
|
1 |
[](https://bolt.new)
|
2 |
|
3 |
-
# Bolt.new Fork by Cole Medin
|
4 |
|
5 |
-
This fork of Bolt.new allows you to choose the LLM that you use for each prompt! Currently, you can use OpenAI, Anthropic, Ollama, OpenRouter, Gemini, or Groq models - and it is easily extended to use any other model supported by the Vercel AI SDK! See the instructions below for running this locally and extending it to include more models.
|
|
|
|
|
|
|
|
|
6 |
|
7 |
# Requested Additions to this Fork - Feel Free to Contribute!!
|
8 |
|
@@ -20,21 +24,24 @@ This fork of Bolt.new allows you to choose the LLM that you use for each prompt!
|
|
20 |
- ✅ Publish projects directly to GitHub (@goncaloalves)
|
21 |
- ✅ Ability to enter API keys in the UI (@ali00209)
|
22 |
- ✅ xAI Grok Beta Integration (@milutinke)
|
|
|
|
|
|
|
|
|
|
|
23 |
- ⬜ **HIGH PRIORITY** - Prevent Bolt from rewriting files as often (file locking and diffs)
|
24 |
- ⬜ **HIGH PRIORITY** - Better prompting for smaller LLMs (code window sometimes doesn't start)
|
25 |
-
- ⬜ **HIGH PRIORITY** Load local projects into the app
|
26 |
- ⬜ **HIGH PRIORITY** - Attach images to prompts
|
27 |
- ⬜ **HIGH PRIORITY** - Run agents in the backend as opposed to a single model call
|
28 |
- ⬜ Mobile friendly
|
29 |
-
- ⬜ LM Studio Integration
|
30 |
- ⬜ Together Integration
|
31 |
- ⬜ Azure Open AI API Integration
|
32 |
-
- ⬜ HuggingFace Integration
|
33 |
- ⬜ Perplexity Integration
|
34 |
- ⬜ Vertex AI Integration
|
35 |
-
-
|
|
|
36 |
- ⬜ Deploy directly to Vercel/Netlify/other similar platforms
|
37 |
-
- ⬜ Ability to revert code to earlier version
|
38 |
- ⬜ Prompt caching
|
39 |
- ⬜ Better prompt enhancing
|
40 |
- ⬜ Have LLM plan the project in a MD file for better results/transparency
|
|
|
1 |
[](https://bolt.new)
|
2 |
|
3 |
+
# Bolt.new Fork by Cole Medin - oTToDev
|
4 |
|
5 |
+
This fork of Bolt.new (oTToDev) allows you to choose the LLM that you use for each prompt! Currently, you can use OpenAI, Anthropic, Ollama, OpenRouter, Gemini, LMStudio, Mistral, xAI, HuggingFace, DeepSeek, or Groq models - and it is easily extended to use any other model supported by the Vercel AI SDK! See the instructions below for running this locally and extending it to include more models.
|
6 |
+
|
7 |
+
Join the community for oTToDev!
|
8 |
+
|
9 |
+
https://thinktank.ottomator.ai
|
10 |
|
11 |
# Requested Additions to this Fork - Feel Free to Contribute!!
|
12 |
|
|
|
24 |
- ✅ Publish projects directly to GitHub (@goncaloalves)
|
25 |
- ✅ Ability to enter API keys in the UI (@ali00209)
|
26 |
- ✅ xAI Grok Beta Integration (@milutinke)
|
27 |
+
- ✅ LM Studio Integration (@karrot0)
|
28 |
+
- ✅ HuggingFace Integration (@ahsan3219)
|
29 |
+
- ✅ Bolt terminal to see the output of LLM run commands (@thecodacus)
|
30 |
+
- ✅ Streaming of code output (@thecodacus)
|
31 |
+
- ✅ Ability to revert code to earlier version (@wonderwhy-er)
|
32 |
- ⬜ **HIGH PRIORITY** - Prevent Bolt from rewriting files as often (file locking and diffs)
|
33 |
- ⬜ **HIGH PRIORITY** - Better prompting for smaller LLMs (code window sometimes doesn't start)
|
34 |
+
- ⬜ **HIGH PRIORITY** - Load local projects into the app
|
35 |
- ⬜ **HIGH PRIORITY** - Attach images to prompts
|
36 |
- ⬜ **HIGH PRIORITY** - Run agents in the backend as opposed to a single model call
|
37 |
- ⬜ Mobile friendly
|
|
|
38 |
- ⬜ Together Integration
|
39 |
- ⬜ Azure Open AI API Integration
|
|
|
40 |
- ⬜ Perplexity Integration
|
41 |
- ⬜ Vertex AI Integration
|
42 |
+
- ✅ Cohere Integration (@hasanraiyan)
|
43 |
+
- ✅ Dynamic model max token length (@hasanraiyan)
|
44 |
- ⬜ Deploy directly to Vercel/Netlify/other similar platforms
|
|
|
45 |
- ⬜ Prompt caching
|
46 |
- ⬜ Better prompt enhancing
|
47 |
- ⬜ Have LLM plan the project in a MD file for better results/transparency
|
app/components/chat/APIKeyManager.tsx
CHANGED
@@ -10,11 +10,7 @@ interface APIKeyManagerProps {
|
|
10 |
labelForGetApiKey?: string;
|
11 |
}
|
12 |
|
13 |
-
export const APIKeyManager: React.FC<APIKeyManagerProps> = ({
|
14 |
-
provider,
|
15 |
-
apiKey,
|
16 |
-
setApiKey,
|
17 |
-
}) => {
|
18 |
const [isEditing, setIsEditing] = useState(false);
|
19 |
const [tempKey, setTempKey] = useState(apiKey);
|
20 |
|
@@ -24,15 +20,29 @@ export const APIKeyManager: React.FC<APIKeyManagerProps> = ({
|
|
24 |
};
|
25 |
|
26 |
return (
|
27 |
-
<div className="flex items-
|
28 |
-
<
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
29 |
{isEditing ? (
|
30 |
-
|
31 |
<input
|
32 |
type="password"
|
33 |
value={tempKey}
|
|
|
34 |
onChange={(e) => setTempKey(e.target.value)}
|
35 |
-
className="flex-1
|
36 |
/>
|
37 |
<IconButton onClick={handleSave} title="Save API Key">
|
38 |
<div className="i-ph:check" />
|
@@ -40,20 +50,15 @@ export const APIKeyManager: React.FC<APIKeyManagerProps> = ({
|
|
40 |
<IconButton onClick={() => setIsEditing(false)} title="Cancel">
|
41 |
<div className="i-ph:x" />
|
42 |
</IconButton>
|
43 |
-
|
44 |
) : (
|
45 |
<>
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
{provider?.getApiKeyLink && <IconButton onClick={() => window.open(provider?.getApiKeyLink)} title="Edit API Key">
|
54 |
-
<span className="mr-2">{provider?.labelForGetApiKey || 'Get API Key'}</span>
|
55 |
-
<div className={provider?.icon || "i-ph:key"} />
|
56 |
-
</IconButton>}
|
57 |
</>
|
58 |
)}
|
59 |
</div>
|
|
|
10 |
labelForGetApiKey?: string;
|
11 |
}
|
12 |
|
13 |
+
export const APIKeyManager: React.FC<APIKeyManagerProps> = ({ provider, apiKey, setApiKey }) => {
|
|
|
|
|
|
|
|
|
14 |
const [isEditing, setIsEditing] = useState(false);
|
15 |
const [tempKey, setTempKey] = useState(apiKey);
|
16 |
|
|
|
20 |
};
|
21 |
|
22 |
return (
|
23 |
+
<div className="flex items-start sm:items-center mt-2 mb-2 flex-col sm:flex-row">
|
24 |
+
<div>
|
25 |
+
<span className="text-sm text-bolt-elements-textSecondary">{provider?.name} API Key:</span>
|
26 |
+
{!isEditing && (
|
27 |
+
<div className="flex items-center mb-4">
|
28 |
+
<span className="flex-1 text-xs text-bolt-elements-textPrimary mr-2">
|
29 |
+
{apiKey ? '••••••••' : 'Not set (will still work if set in .env file)'}
|
30 |
+
</span>
|
31 |
+
<IconButton onClick={() => setIsEditing(true)} title="Edit API Key">
|
32 |
+
<div className="i-ph:pencil-simple" />
|
33 |
+
</IconButton>
|
34 |
+
</div>
|
35 |
+
)}
|
36 |
+
</div>
|
37 |
+
|
38 |
{isEditing ? (
|
39 |
+
<div className="flex items-center gap-3 mt-2">
|
40 |
<input
|
41 |
type="password"
|
42 |
value={tempKey}
|
43 |
+
placeholder="Your API Key"
|
44 |
onChange={(e) => setTempKey(e.target.value)}
|
45 |
+
className="flex-1 px-2 py-1 text-xs lg:text-sm rounded border border-bolt-elements-borderColor bg-bolt-elements-prompt-background text-bolt-elements-textPrimary focus:outline-none focus:ring-2 focus:ring-bolt-elements-focus"
|
46 |
/>
|
47 |
<IconButton onClick={handleSave} title="Save API Key">
|
48 |
<div className="i-ph:check" />
|
|
|
50 |
<IconButton onClick={() => setIsEditing(false)} title="Cancel">
|
51 |
<div className="i-ph:x" />
|
52 |
</IconButton>
|
53 |
+
</div>
|
54 |
) : (
|
55 |
<>
|
56 |
+
{provider?.getApiKeyLink && (
|
57 |
+
<IconButton className="ml-auto" onClick={() => window.open(provider?.getApiKeyLink)} title="Edit API Key">
|
58 |
+
<span className="mr-2 text-xs lg:text-sm">{provider?.labelForGetApiKey || 'Get API Key'}</span>
|
59 |
+
<div className={provider?.icon || 'i-ph:key'} />
|
60 |
+
</IconButton>
|
61 |
+
)}
|
|
|
|
|
|
|
|
|
|
|
62 |
</>
|
63 |
)}
|
64 |
</div>
|
app/components/chat/BaseChat.tsx
CHANGED
@@ -29,9 +29,9 @@ const EXAMPLE_PROMPTS = [
|
|
29 |
|
30 |
const providerList = PROVIDER_LIST;
|
31 |
|
32 |
-
const ModelSelector = ({ model, setModel, provider, setProvider, modelList, providerList }) => {
|
33 |
return (
|
34 |
-
<div className="mb-2 flex gap-2">
|
35 |
<select
|
36 |
value={provider?.name}
|
37 |
onChange={(e) => {
|
@@ -51,8 +51,7 @@ const ModelSelector = ({ model, setModel, provider, setProvider, modelList, prov
|
|
51 |
key={provider?.name}
|
52 |
value={model}
|
53 |
onChange={(e) => setModel(e.target.value)}
|
54 |
-
|
55 |
-
className="flex-1 p-2 rounded-lg border border-bolt-elements-borderColor bg-bolt-elements-prompt-background text-bolt-elements-textPrimary focus:outline-none focus:ring-2 focus:ring-bolt-elements-focus transition-all"
|
56 |
>
|
57 |
{[...modelList]
|
58 |
.filter((e) => e.provider == provider?.name && e.name)
|
@@ -193,25 +192,25 @@ export const BaseChat = React.forwardRef<HTMLDivElement, BaseChatProps>(
|
|
193 |
ref={ref}
|
194 |
className={classNames(
|
195 |
styles.BaseChat,
|
196 |
-
'relative flex h-full w-full overflow-hidden bg-bolt-elements-background-depth-1',
|
197 |
)}
|
198 |
data-chat-visible={showChat}
|
199 |
>
|
200 |
<ClientOnly>{() => <Menu />}</ClientOnly>
|
201 |
-
<div ref={scrollRef} className="flex overflow-y-auto w-full h-full">
|
202 |
-
<div className={classNames(styles.Chat, 'flex flex-col flex-grow min-w-[var(--chat-min-width)] h-full')}>
|
203 |
{!chatStarted && (
|
204 |
-
<div id="intro" className="mt-[26vh] max-w-chat mx-auto text-center">
|
205 |
-
<h1 className="text-6xl font-bold text-bolt-elements-textPrimary mb-4 animate-fade-in">
|
206 |
Where ideas begin
|
207 |
</h1>
|
208 |
-
<p className="text-xl mb-8 text-bolt-elements-textSecondary animate-fade-in animation-delay-200">
|
209 |
Bring ideas to life in seconds or get help on existing projects.
|
210 |
</p>
|
211 |
</div>
|
212 |
)}
|
213 |
<div
|
214 |
-
className={classNames('pt-6 px-6', {
|
215 |
'h-full flex flex-col': chatStarted,
|
216 |
})}
|
217 |
>
|
@@ -220,7 +219,7 @@ export const BaseChat = React.forwardRef<HTMLDivElement, BaseChatProps>(
|
|
220 |
return chatStarted ? (
|
221 |
<Messages
|
222 |
ref={messageRef}
|
223 |
-
className="flex flex-col w-full flex-1 max-w-chat
|
224 |
messages={messages}
|
225 |
isStreaming={isStreaming}
|
226 |
/>
|
@@ -228,9 +227,12 @@ export const BaseChat = React.forwardRef<HTMLDivElement, BaseChatProps>(
|
|
228 |
}}
|
229 |
</ClientOnly>
|
230 |
<div
|
231 |
-
className={classNames(
|
232 |
-
'
|
233 |
-
|
|
|
|
|
|
|
234 |
>
|
235 |
<ModelSelector
|
236 |
key={provider?.name + ':' + modelList.length}
|
@@ -240,7 +242,9 @@ export const BaseChat = React.forwardRef<HTMLDivElement, BaseChatProps>(
|
|
240 |
provider={provider}
|
241 |
setProvider={setProvider}
|
242 |
providerList={PROVIDER_LIST}
|
|
|
243 |
/>
|
|
|
244 |
{provider && (
|
245 |
<APIKeyManager
|
246 |
provider={provider}
|
@@ -248,7 +252,6 @@ export const BaseChat = React.forwardRef<HTMLDivElement, BaseChatProps>(
|
|
248 |
setApiKey={(key) => updateApiKey(provider.name, key)}
|
249 |
/>
|
250 |
)}
|
251 |
-
|
252 |
<FilePreview
|
253 |
files={uploadedFiles}
|
254 |
imageDataList={imageDataList}
|
@@ -257,7 +260,6 @@ export const BaseChat = React.forwardRef<HTMLDivElement, BaseChatProps>(
|
|
257 |
setImageDataList?.(imageDataList.filter((_, i) => i !== index));
|
258 |
}}
|
259 |
/>
|
260 |
-
|
261 |
<div
|
262 |
className={classNames(
|
263 |
'shadow-lg border border-bolt-elements-borderColor bg-bolt-elements-prompt-background backdrop-filter backdrop-blur-[8px] rounded-lg overflow-hidden transition-all',
|
@@ -265,7 +267,7 @@ export const BaseChat = React.forwardRef<HTMLDivElement, BaseChatProps>(
|
|
265 |
>
|
266 |
<textarea
|
267 |
ref={textareaRef}
|
268 |
-
className={`w-full pl-4 pt-4 pr-16 focus:outline-none focus:ring-
|
269 |
onKeyDown={(event) => {
|
270 |
if (event.key === 'Enter') {
|
271 |
if (event.shiftKey) {
|
@@ -347,7 +349,6 @@ export const BaseChat = React.forwardRef<HTMLDivElement, BaseChatProps>(
|
|
347 |
) : null}
|
348 |
</div>
|
349 |
</div>
|
350 |
-
<div className="bg-bolt-elements-background-depth-1 pb-6">{/* Ghost Element */}</div>
|
351 |
</div>
|
352 |
</div>
|
353 |
{!chatStarted && (
|
|
|
29 |
|
30 |
const providerList = PROVIDER_LIST;
|
31 |
|
32 |
+
const ModelSelector = ({ model, setModel, provider, setProvider, modelList, providerList, apiKeys }) => {
|
33 |
return (
|
34 |
+
<div className="mb-2 flex gap-2 flex-col sm:flex-row">
|
35 |
<select
|
36 |
value={provider?.name}
|
37 |
onChange={(e) => {
|
|
|
51 |
key={provider?.name}
|
52 |
value={model}
|
53 |
onChange={(e) => setModel(e.target.value)}
|
54 |
+
className="flex-1 p-2 rounded-lg border border-bolt-elements-borderColor bg-bolt-elements-prompt-background text-bolt-elements-textPrimary focus:outline-none focus:ring-2 focus:ring-bolt-elements-focus transition-all lg:max-w-[70%] "
|
|
|
55 |
>
|
56 |
{[...modelList]
|
57 |
.filter((e) => e.provider == provider?.name && e.name)
|
|
|
192 |
ref={ref}
|
193 |
className={classNames(
|
194 |
styles.BaseChat,
|
195 |
+
'relative flex flex-col lg:flex-row h-full w-full overflow-hidden bg-bolt-elements-background-depth-1',
|
196 |
)}
|
197 |
data-chat-visible={showChat}
|
198 |
>
|
199 |
<ClientOnly>{() => <Menu />}</ClientOnly>
|
200 |
+
<div ref={scrollRef} className="flex flex-col lg:flex-row overflow-y-auto w-full h-full">
|
201 |
+
<div className={classNames(styles.Chat, 'flex flex-col flex-grow lg:min-w-[var(--chat-min-width)] h-full')}>
|
202 |
{!chatStarted && (
|
203 |
+
<div id="intro" className="mt-[26vh] max-w-chat mx-auto text-center px-4 lg:px-0">
|
204 |
+
<h1 className="text-3xl lg:text-6xl font-bold text-bolt-elements-textPrimary mb-4 animate-fade-in">
|
205 |
Where ideas begin
|
206 |
</h1>
|
207 |
+
<p className="text-md lg:text-xl mb-8 text-bolt-elements-textSecondary animate-fade-in animation-delay-200">
|
208 |
Bring ideas to life in seconds or get help on existing projects.
|
209 |
</p>
|
210 |
</div>
|
211 |
)}
|
212 |
<div
|
213 |
+
className={classNames('pt-6 px-2 sm:px-6', {
|
214 |
'h-full flex flex-col': chatStarted,
|
215 |
})}
|
216 |
>
|
|
|
219 |
return chatStarted ? (
|
220 |
<Messages
|
221 |
ref={messageRef}
|
222 |
+
className="flex flex-col w-full flex-1 max-w-chat pb-6 mx-auto z-1"
|
223 |
messages={messages}
|
224 |
isStreaming={isStreaming}
|
225 |
/>
|
|
|
227 |
}}
|
228 |
</ClientOnly>
|
229 |
<div
|
230 |
+
className={classNames(
|
231 |
+
' bg-bolt-elements-background-depth-2 p-3 rounded-lg border border-bolt-elements-borderColor relative w-full max-w-chat mx-auto z-prompt mb-6',
|
232 |
+
{
|
233 |
+
'sticky bottom-2': chatStarted,
|
234 |
+
},
|
235 |
+
)}
|
236 |
>
|
237 |
<ModelSelector
|
238 |
key={provider?.name + ':' + modelList.length}
|
|
|
242 |
provider={provider}
|
243 |
setProvider={setProvider}
|
244 |
providerList={PROVIDER_LIST}
|
245 |
+
apiKeys={apiKeys}
|
246 |
/>
|
247 |
+
|
248 |
{provider && (
|
249 |
<APIKeyManager
|
250 |
provider={provider}
|
|
|
252 |
setApiKey={(key) => updateApiKey(provider.name, key)}
|
253 |
/>
|
254 |
)}
|
|
|
255 |
<FilePreview
|
256 |
files={uploadedFiles}
|
257 |
imageDataList={imageDataList}
|
|
|
260 |
setImageDataList?.(imageDataList.filter((_, i) => i !== index));
|
261 |
}}
|
262 |
/>
|
|
|
263 |
<div
|
264 |
className={classNames(
|
265 |
'shadow-lg border border-bolt-elements-borderColor bg-bolt-elements-prompt-background backdrop-filter backdrop-blur-[8px] rounded-lg overflow-hidden transition-all',
|
|
|
267 |
>
|
268 |
<textarea
|
269 |
ref={textareaRef}
|
270 |
+
className={`w-full pl-4 pt-4 pr-16 focus:outline-none focus:ring-0 focus:border-none focus:shadow-none resize-none text-md text-bolt-elements-textPrimary placeholder-bolt-elements-textTertiary bg-transparent transition-all`}
|
271 |
onKeyDown={(event) => {
|
272 |
if (event.key === 'Enter') {
|
273 |
if (event.shiftKey) {
|
|
|
349 |
) : null}
|
350 |
</div>
|
351 |
</div>
|
|
|
352 |
</div>
|
353 |
</div>
|
354 |
{!chatStarted && (
|
app/components/chat/Messages.client.tsx
CHANGED
@@ -4,7 +4,7 @@ import { classNames } from '~/utils/classNames';
|
|
4 |
import { AssistantMessage } from './AssistantMessage';
|
5 |
import { UserMessage } from './UserMessage';
|
6 |
import * as Tooltip from '@radix-ui/react-tooltip';
|
7 |
-
import { useLocation
|
8 |
import { db, chatId } from '~/lib/persistence/useChatHistory';
|
9 |
import { forkChat } from '~/lib/persistence/db';
|
10 |
import { toast } from 'react-toastify';
|
@@ -19,7 +19,6 @@ interface MessagesProps {
|
|
19 |
export const Messages = React.forwardRef<HTMLDivElement, MessagesProps>((props: MessagesProps, ref) => {
|
20 |
const { id, isStreaming = false, messages = [] } = props;
|
21 |
const location = useLocation();
|
22 |
-
const navigate = useNavigate();
|
23 |
|
24 |
const handleRewind = (messageId: string) => {
|
25 |
const searchParams = new URLSearchParams(location.search);
|
@@ -69,53 +68,57 @@ export const Messages = React.forwardRef<HTMLDivElement, MessagesProps>((props:
|
|
69 |
<div className="grid grid-col-1 w-full">
|
70 |
{isUserMessage ? <UserMessage content={content} /> : <AssistantMessage content={content} />}
|
71 |
</div>
|
72 |
-
{!isUserMessage && (
|
73 |
-
<
|
74 |
-
<Tooltip.
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
|
|
|
|
|
|
|
|
|
81 |
)}
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
|
86 |
-
|
87 |
-
|
88 |
-
|
89 |
-
|
90 |
-
|
91 |
-
|
92 |
-
</Tooltip.
|
93 |
-
</Tooltip.
|
94 |
-
</Tooltip.Root>
|
95 |
|
96 |
-
|
97 |
-
|
98 |
-
|
99 |
-
|
100 |
-
|
101 |
-
|
102 |
-
|
103 |
-
|
104 |
-
|
105 |
-
|
106 |
-
|
107 |
-
|
108 |
-
|
109 |
-
|
110 |
-
|
111 |
-
|
112 |
-
|
113 |
-
|
114 |
-
|
115 |
-
|
116 |
-
|
117 |
-
|
118 |
-
|
|
|
119 |
</div>
|
120 |
);
|
121 |
})
|
|
|
4 |
import { AssistantMessage } from './AssistantMessage';
|
5 |
import { UserMessage } from './UserMessage';
|
6 |
import * as Tooltip from '@radix-ui/react-tooltip';
|
7 |
+
import { useLocation } from '@remix-run/react';
|
8 |
import { db, chatId } from '~/lib/persistence/useChatHistory';
|
9 |
import { forkChat } from '~/lib/persistence/db';
|
10 |
import { toast } from 'react-toastify';
|
|
|
19 |
export const Messages = React.forwardRef<HTMLDivElement, MessagesProps>((props: MessagesProps, ref) => {
|
20 |
const { id, isStreaming = false, messages = [] } = props;
|
21 |
const location = useLocation();
|
|
|
22 |
|
23 |
const handleRewind = (messageId: string) => {
|
24 |
const searchParams = new URLSearchParams(location.search);
|
|
|
68 |
<div className="grid grid-col-1 w-full">
|
69 |
{isUserMessage ? <UserMessage content={content} /> : <AssistantMessage content={content} />}
|
70 |
</div>
|
71 |
+
{!isUserMessage && (
|
72 |
+
<div className="flex gap-2 flex-col lg:flex-row">
|
73 |
+
<Tooltip.Root>
|
74 |
+
<Tooltip.Trigger asChild>
|
75 |
+
{messageId && (
|
76 |
+
<button
|
77 |
+
onClick={() => handleRewind(messageId)}
|
78 |
+
key="i-ph:arrow-u-up-left"
|
79 |
+
className={classNames(
|
80 |
+
'i-ph:arrow-u-up-left',
|
81 |
+
'text-xl text-bolt-elements-textSecondary hover:text-bolt-elements-textPrimary transition-colors',
|
82 |
+
)}
|
83 |
+
/>
|
84 |
)}
|
85 |
+
</Tooltip.Trigger>
|
86 |
+
<Tooltip.Portal>
|
87 |
+
<Tooltip.Content
|
88 |
+
className="bg-bolt-elements-tooltip-background text-bolt-elements-textPrimary px-3 py-2 rounded-lg text-sm shadow-lg"
|
89 |
+
sideOffset={5}
|
90 |
+
style={{ zIndex: 1000 }}
|
91 |
+
>
|
92 |
+
Revert to this message
|
93 |
+
<Tooltip.Arrow className="fill-bolt-elements-tooltip-background" />
|
94 |
+
</Tooltip.Content>
|
95 |
+
</Tooltip.Portal>
|
96 |
+
</Tooltip.Root>
|
|
|
97 |
|
98 |
+
<Tooltip.Root>
|
99 |
+
<Tooltip.Trigger asChild>
|
100 |
+
<button
|
101 |
+
onClick={() => handleFork(messageId)}
|
102 |
+
key="i-ph:git-fork"
|
103 |
+
className={classNames(
|
104 |
+
'i-ph:git-fork',
|
105 |
+
'text-xl text-bolt-elements-textSecondary hover:text-bolt-elements-textPrimary transition-colors',
|
106 |
+
)}
|
107 |
+
/>
|
108 |
+
</Tooltip.Trigger>
|
109 |
+
<Tooltip.Portal>
|
110 |
+
<Tooltip.Content
|
111 |
+
className="bg-bolt-elements-tooltip-background text-bolt-elements-textPrimary px-3 py-2 rounded-lg text-sm shadow-lg"
|
112 |
+
sideOffset={5}
|
113 |
+
style={{ zIndex: 1000 }}
|
114 |
+
>
|
115 |
+
Fork chat from this message
|
116 |
+
<Tooltip.Arrow className="fill-bolt-elements-tooltip-background" />
|
117 |
+
</Tooltip.Content>
|
118 |
+
</Tooltip.Portal>
|
119 |
+
</Tooltip.Root>
|
120 |
+
</div>
|
121 |
+
)}
|
122 |
</div>
|
123 |
);
|
124 |
})
|
app/components/header/HeaderActionButtons.client.tsx
CHANGED
@@ -1,4 +1,5 @@
|
|
1 |
import { useStore } from '@nanostores/react';
|
|
|
2 |
import { chatStore } from '~/lib/stores/chat';
|
3 |
import { workbenchStore } from '~/lib/stores/workbench';
|
4 |
import { classNames } from '~/utils/classNames';
|
@@ -9,6 +10,8 @@ export function HeaderActionButtons({}: HeaderActionButtonsProps) {
|
|
9 |
const showWorkbench = useStore(workbenchStore.showWorkbench);
|
10 |
const { showChat } = useStore(chatStore);
|
11 |
|
|
|
|
|
12 |
const canHideChat = showWorkbench || !showChat;
|
13 |
|
14 |
return (
|
@@ -16,7 +19,7 @@ export function HeaderActionButtons({}: HeaderActionButtonsProps) {
|
|
16 |
<div className="flex border border-bolt-elements-borderColor rounded-md overflow-hidden">
|
17 |
<Button
|
18 |
active={showChat}
|
19 |
-
disabled={!canHideChat}
|
20 |
onClick={() => {
|
21 |
if (canHideChat) {
|
22 |
chatStore.setKey('showChat', !showChat);
|
|
|
1 |
import { useStore } from '@nanostores/react';
|
2 |
+
import useViewport from '~/lib/hooks';
|
3 |
import { chatStore } from '~/lib/stores/chat';
|
4 |
import { workbenchStore } from '~/lib/stores/workbench';
|
5 |
import { classNames } from '~/utils/classNames';
|
|
|
10 |
const showWorkbench = useStore(workbenchStore.showWorkbench);
|
11 |
const { showChat } = useStore(chatStore);
|
12 |
|
13 |
+
const isSmallViewport = useViewport(1024);
|
14 |
+
|
15 |
const canHideChat = showWorkbench || !showChat;
|
16 |
|
17 |
return (
|
|
|
19 |
<div className="flex border border-bolt-elements-borderColor rounded-md overflow-hidden">
|
20 |
<Button
|
21 |
active={showChat}
|
22 |
+
disabled={!canHideChat || isSmallViewport} // expand button is disabled on mobile as it's needed
|
23 |
onClick={() => {
|
24 |
if (canHideChat) {
|
25 |
chatStore.setKey('showChat', !showChat);
|
app/components/workbench/Workbench.client.tsx
CHANGED
@@ -16,6 +16,7 @@ import { cubicEasingFn } from '~/utils/easings';
|
|
16 |
import { renderLogger } from '~/utils/logger';
|
17 |
import { EditorPanel } from './EditorPanel';
|
18 |
import { Preview } from './Preview';
|
|
|
19 |
|
20 |
interface WorkspaceProps {
|
21 |
chatStarted?: boolean;
|
@@ -65,6 +66,8 @@ export const Workbench = memo(({ chatStarted, isStreaming }: WorkspaceProps) =>
|
|
65 |
const files = useStore(workbenchStore.files);
|
66 |
const selectedView = useStore(workbenchStore.currentView);
|
67 |
|
|
|
|
|
68 |
const setSelectedView = (view: WorkbenchViewType) => {
|
69 |
workbenchStore.currentView.set(view);
|
70 |
};
|
@@ -128,18 +131,20 @@ export const Workbench = memo(({ chatStarted, isStreaming }: WorkspaceProps) =>
|
|
128 |
className={classNames(
|
129 |
'fixed top-[calc(var(--header-height)+1.5rem)] bottom-6 w-[var(--workbench-inner-width)] mr-4 z-0 transition-[left,width] duration-200 bolt-ease-cubic-bezier',
|
130 |
{
|
|
|
|
|
131 |
'left-[var(--workbench-left)]': showWorkbench,
|
132 |
'left-[100%]': !showWorkbench,
|
133 |
},
|
134 |
)}
|
135 |
>
|
136 |
-
<div className="absolute inset-0 px-6">
|
137 |
<div className="h-full flex flex-col bg-bolt-elements-background-depth-2 border border-bolt-elements-borderColor shadow-sm rounded-lg overflow-hidden">
|
138 |
<div className="flex items-center px-3 py-2 border-b border-bolt-elements-borderColor">
|
139 |
<Slider selected={selectedView} options={sliderOptions} setSelected={setSelectedView} />
|
140 |
<div className="ml-auto" />
|
141 |
{selectedView === 'code' && (
|
142 |
-
|
143 |
<PanelHeaderButton
|
144 |
className="mr-1 text-sm"
|
145 |
onClick={() => {
|
@@ -165,29 +170,32 @@ export const Workbench = memo(({ chatStarted, isStreaming }: WorkspaceProps) =>
|
|
165 |
<PanelHeaderButton
|
166 |
className="mr-1 text-sm"
|
167 |
onClick={() => {
|
168 |
-
const repoName = prompt(
|
|
|
|
|
|
|
169 |
if (!repoName) {
|
170 |
-
alert(
|
171 |
return;
|
172 |
}
|
173 |
-
const githubUsername = prompt(
|
174 |
if (!githubUsername) {
|
175 |
-
alert(
|
176 |
return;
|
177 |
}
|
178 |
-
const githubToken = prompt(
|
179 |
if (!githubToken) {
|
180 |
-
alert(
|
181 |
return;
|
182 |
}
|
183 |
-
|
184 |
-
|
185 |
}}
|
186 |
>
|
187 |
<div className="i-ph:github-logo" />
|
188 |
Push to GitHub
|
189 |
</PanelHeaderButton>
|
190 |
-
|
191 |
)}
|
192 |
<IconButton
|
193 |
icon="i-ph:x-circle"
|
|
|
16 |
import { renderLogger } from '~/utils/logger';
|
17 |
import { EditorPanel } from './EditorPanel';
|
18 |
import { Preview } from './Preview';
|
19 |
+
import useViewport from '~/lib/hooks';
|
20 |
|
21 |
interface WorkspaceProps {
|
22 |
chatStarted?: boolean;
|
|
|
66 |
const files = useStore(workbenchStore.files);
|
67 |
const selectedView = useStore(workbenchStore.currentView);
|
68 |
|
69 |
+
const isSmallViewport = useViewport(1024);
|
70 |
+
|
71 |
const setSelectedView = (view: WorkbenchViewType) => {
|
72 |
workbenchStore.currentView.set(view);
|
73 |
};
|
|
|
131 |
className={classNames(
|
132 |
'fixed top-[calc(var(--header-height)+1.5rem)] bottom-6 w-[var(--workbench-inner-width)] mr-4 z-0 transition-[left,width] duration-200 bolt-ease-cubic-bezier',
|
133 |
{
|
134 |
+
'w-full': isSmallViewport,
|
135 |
+
'left-0': showWorkbench && isSmallViewport,
|
136 |
'left-[var(--workbench-left)]': showWorkbench,
|
137 |
'left-[100%]': !showWorkbench,
|
138 |
},
|
139 |
)}
|
140 |
>
|
141 |
+
<div className="absolute inset-0 px-2 lg:px-6">
|
142 |
<div className="h-full flex flex-col bg-bolt-elements-background-depth-2 border border-bolt-elements-borderColor shadow-sm rounded-lg overflow-hidden">
|
143 |
<div className="flex items-center px-3 py-2 border-b border-bolt-elements-borderColor">
|
144 |
<Slider selected={selectedView} options={sliderOptions} setSelected={setSelectedView} />
|
145 |
<div className="ml-auto" />
|
146 |
{selectedView === 'code' && (
|
147 |
+
<div className="flex overflow-y-auto">
|
148 |
<PanelHeaderButton
|
149 |
className="mr-1 text-sm"
|
150 |
onClick={() => {
|
|
|
170 |
<PanelHeaderButton
|
171 |
className="mr-1 text-sm"
|
172 |
onClick={() => {
|
173 |
+
const repoName = prompt(
|
174 |
+
'Please enter a name for your new GitHub repository:',
|
175 |
+
'bolt-generated-project',
|
176 |
+
);
|
177 |
if (!repoName) {
|
178 |
+
alert('Repository name is required. Push to GitHub cancelled.');
|
179 |
return;
|
180 |
}
|
181 |
+
const githubUsername = prompt('Please enter your GitHub username:');
|
182 |
if (!githubUsername) {
|
183 |
+
alert('GitHub username is required. Push to GitHub cancelled.');
|
184 |
return;
|
185 |
}
|
186 |
+
const githubToken = prompt('Please enter your GitHub personal access token:');
|
187 |
if (!githubToken) {
|
188 |
+
alert('GitHub token is required. Push to GitHub cancelled.');
|
189 |
return;
|
190 |
}
|
191 |
+
|
192 |
+
workbenchStore.pushToGitHub(repoName, githubUsername, githubToken);
|
193 |
}}
|
194 |
>
|
195 |
<div className="i-ph:github-logo" />
|
196 |
Push to GitHub
|
197 |
</PanelHeaderButton>
|
198 |
+
</div>
|
199 |
)}
|
200 |
<IconButton
|
201 |
icon="i-ph:x-circle"
|
app/lib/.server/llm/api-key.ts
CHANGED
@@ -35,6 +35,8 @@ export function getAPIKey(cloudflareEnv: Env, provider: string, userApiKeys?: Re
|
|
35 |
return env.OPENAI_LIKE_API_KEY || cloudflareEnv.OPENAI_LIKE_API_KEY;
|
36 |
case "xAI":
|
37 |
return env.XAI_API_KEY || cloudflareEnv.XAI_API_KEY;
|
|
|
|
|
38 |
default:
|
39 |
return "";
|
40 |
}
|
|
|
35 |
return env.OPENAI_LIKE_API_KEY || cloudflareEnv.OPENAI_LIKE_API_KEY;
|
36 |
case "xAI":
|
37 |
return env.XAI_API_KEY || cloudflareEnv.XAI_API_KEY;
|
38 |
+
case "Cohere":
|
39 |
+
return env.COHERE_API_KEY;
|
40 |
default:
|
41 |
return "";
|
42 |
}
|
app/lib/.server/llm/model.ts
CHANGED
@@ -7,6 +7,11 @@ import { createGoogleGenerativeAI } from '@ai-sdk/google';
|
|
7 |
import { ollama } from 'ollama-ai-provider';
|
8 |
import { createOpenRouter } from "@openrouter/ai-sdk-provider";
|
9 |
import { createMistral } from '@ai-sdk/mistral';
|
|
|
|
|
|
|
|
|
|
|
10 |
|
11 |
export function getAnthropicModel(apiKey: string, model: string) {
|
12 |
const anthropic = createAnthropic({
|
@@ -22,14 +27,16 @@ export function getOpenAILikeModel(baseURL: string, apiKey: string, model: strin
|
|
22 |
baseURL,
|
23 |
apiKey,
|
24 |
});
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
|
|
|
|
33 |
}
|
34 |
|
35 |
export function getOpenAIModel(apiKey: string, model: string) {
|
@@ -76,7 +83,7 @@ export function getHuggingFaceModel(apiKey: string, model: string) {
|
|
76 |
|
77 |
export function getOllamaModel(baseURL: string, model: string) {
|
78 |
let Ollama = ollama(model, {
|
79 |
-
numCtx:
|
80 |
});
|
81 |
|
82 |
Ollama.config.baseURL = `${baseURL}/api`;
|
@@ -150,6 +157,8 @@ export function getModel(provider: string, model: string, env: Env, apiKeys?: Re
|
|
150 |
return getLMStudioModel(baseURL, model);
|
151 |
case 'xAI':
|
152 |
return getXAIModel(apiKey, model);
|
|
|
|
|
153 |
default:
|
154 |
return getOllamaModel(baseURL, model);
|
155 |
}
|
|
|
7 |
import { ollama } from 'ollama-ai-provider';
|
8 |
import { createOpenRouter } from "@openrouter/ai-sdk-provider";
|
9 |
import { createMistral } from '@ai-sdk/mistral';
|
10 |
+
import { createCohere } from '@ai-sdk/cohere'
|
11 |
+
|
12 |
+
export const DEFAULT_NUM_CTX = process.env.DEFAULT_NUM_CTX ?
|
13 |
+
parseInt(process.env.DEFAULT_NUM_CTX, 10) :
|
14 |
+
32768;
|
15 |
|
16 |
export function getAnthropicModel(apiKey: string, model: string) {
|
17 |
const anthropic = createAnthropic({
|
|
|
27 |
baseURL,
|
28 |
apiKey,
|
29 |
});
|
30 |
+
|
31 |
+
return openai(model);
|
32 |
+
}
|
33 |
+
|
34 |
+
export function getCohereAIModel(apiKey:string, model: string){
|
35 |
+
const cohere = createCohere({
|
36 |
+
apiKey,
|
37 |
+
});
|
38 |
+
|
39 |
+
return cohere(model);
|
40 |
}
|
41 |
|
42 |
export function getOpenAIModel(apiKey: string, model: string) {
|
|
|
83 |
|
84 |
export function getOllamaModel(baseURL: string, model: string) {
|
85 |
let Ollama = ollama(model, {
|
86 |
+
numCtx: DEFAULT_NUM_CTX,
|
87 |
});
|
88 |
|
89 |
Ollama.config.baseURL = `${baseURL}/api`;
|
|
|
157 |
return getLMStudioModel(baseURL, model);
|
158 |
case 'xAI':
|
159 |
return getXAIModel(apiKey, model);
|
160 |
+
case 'Cohere':
|
161 |
+
return getCohereAIModel(apiKey, model);
|
162 |
default:
|
163 |
return getOllamaModel(baseURL, model);
|
164 |
}
|
app/lib/.server/llm/stream-text.ts
CHANGED
@@ -58,7 +58,6 @@ function extractPropertiesFromMessage(message: Message): { model: string; provid
|
|
58 |
|
59 |
return { model, provider, content: cleanedContent };
|
60 |
}
|
61 |
-
|
62 |
export function streamText(
|
63 |
messages: Messages,
|
64 |
env: Env,
|
@@ -68,8 +67,6 @@ export function streamText(
|
|
68 |
let currentModel = DEFAULT_MODEL;
|
69 |
let currentProvider = DEFAULT_PROVIDER;
|
70 |
|
71 |
-
// console.log('StreamText:', JSON.stringify(messages));
|
72 |
-
|
73 |
const processedMessages = messages.map((message) => {
|
74 |
if (message.role === 'user') {
|
75 |
const { model, provider, content } = extractPropertiesFromMessage(message);
|
@@ -83,25 +80,19 @@ export function streamText(
|
|
83 |
return { ...message, content };
|
84 |
}
|
85 |
|
86 |
-
|
87 |
-
|
88 |
-
|
89 |
-
|
90 |
-
|
91 |
-
|
92 |
-
|
93 |
-
|
94 |
-
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
-
|
99 |
-
|
100 |
-
|
101 |
-
|
102 |
-
};
|
103 |
-
|
104 |
-
// console.log('LLM Config:', llmConfig);
|
105 |
-
|
106 |
-
return _streamText(llmConfig);
|
107 |
-
}
|
|
|
58 |
|
59 |
return { model, provider, content: cleanedContent };
|
60 |
}
|
|
|
61 |
export function streamText(
|
62 |
messages: Messages,
|
63 |
env: Env,
|
|
|
67 |
let currentModel = DEFAULT_MODEL;
|
68 |
let currentProvider = DEFAULT_PROVIDER;
|
69 |
|
|
|
|
|
70 |
const processedMessages = messages.map((message) => {
|
71 |
if (message.role === 'user') {
|
72 |
const { model, provider, content } = extractPropertiesFromMessage(message);
|
|
|
80 |
return { ...message, content };
|
81 |
}
|
82 |
|
83 |
+
const modelDetails = MODEL_LIST.find((m) => m.name === currentModel);
|
84 |
+
|
85 |
+
const dynamicMaxTokens =
|
86 |
+
modelDetails && modelDetails.maxTokenAllowed
|
87 |
+
? modelDetails.maxTokenAllowed
|
88 |
+
: MAX_TOKENS;
|
89 |
+
|
90 |
+
return _streamText({
|
91 |
+
model: getModel(currentProvider, currentModel, env, apiKeys),
|
92 |
+
system: getSystemPrompt(),
|
93 |
+
maxTokens: dynamicMaxTokens,
|
94 |
+
messages: convertToCoreMessages(processedMessages),
|
95 |
+
...options,
|
96 |
+
});
|
97 |
+
}
|
98 |
+
)}
|
|
|
|
|
|
|
|
|
|
|
|
app/lib/hooks/index.ts
CHANGED
@@ -2,3 +2,4 @@ export * from './useMessageParser';
|
|
2 |
export * from './usePromptEnhancer';
|
3 |
export * from './useShortcuts';
|
4 |
export * from './useSnapScroll';
|
|
|
|
2 |
export * from './usePromptEnhancer';
|
3 |
export * from './useShortcuts';
|
4 |
export * from './useSnapScroll';
|
5 |
+
export { default } from './useViewport';
|
app/lib/hooks/usePromptEnhancer.ts
CHANGED
@@ -1,4 +1,5 @@
|
|
1 |
import { useState } from 'react';
|
|
|
2 |
import { createScopedLogger } from '~/utils/logger';
|
3 |
|
4 |
const logger = createScopedLogger('usePromptEnhancement');
|
@@ -13,54 +14,54 @@ export function usePromptEnhancer() {
|
|
13 |
};
|
14 |
|
15 |
const enhancePrompt = async (
|
16 |
-
input: string,
|
17 |
setInput: (value: string) => void,
|
18 |
model: string,
|
19 |
-
provider:
|
20 |
-
apiKeys?: Record<string, string
|
21 |
) => {
|
22 |
setEnhancingPrompt(true);
|
23 |
setPromptEnhanced(false);
|
24 |
-
|
25 |
const requestBody: any = {
|
26 |
message: input,
|
27 |
model,
|
28 |
provider,
|
29 |
};
|
30 |
-
|
31 |
if (apiKeys) {
|
32 |
requestBody.apiKeys = apiKeys;
|
33 |
}
|
34 |
-
|
35 |
const response = await fetch('/api/enhancer', {
|
36 |
method: 'POST',
|
37 |
body: JSON.stringify(requestBody),
|
38 |
});
|
39 |
-
|
40 |
const reader = response.body?.getReader();
|
41 |
-
|
42 |
const originalInput = input;
|
43 |
-
|
44 |
if (reader) {
|
45 |
const decoder = new TextDecoder();
|
46 |
-
|
47 |
let _input = '';
|
48 |
let _error;
|
49 |
-
|
50 |
try {
|
51 |
setInput('');
|
52 |
-
|
53 |
while (true) {
|
54 |
const { value, done } = await reader.read();
|
55 |
-
|
56 |
if (done) {
|
57 |
break;
|
58 |
}
|
59 |
-
|
60 |
_input += decoder.decode(value);
|
61 |
-
|
62 |
logger.trace('Set input', _input);
|
63 |
-
|
64 |
setInput(_input);
|
65 |
}
|
66 |
} catch (error) {
|
@@ -70,10 +71,10 @@ export function usePromptEnhancer() {
|
|
70 |
if (_error) {
|
71 |
logger.error(_error);
|
72 |
}
|
73 |
-
|
74 |
setEnhancingPrompt(false);
|
75 |
setPromptEnhanced(true);
|
76 |
-
|
77 |
setTimeout(() => {
|
78 |
setInput(_input);
|
79 |
});
|
|
|
1 |
import { useState } from 'react';
|
2 |
+
import type { ProviderInfo } from '~/types/model';
|
3 |
import { createScopedLogger } from '~/utils/logger';
|
4 |
|
5 |
const logger = createScopedLogger('usePromptEnhancement');
|
|
|
14 |
};
|
15 |
|
16 |
const enhancePrompt = async (
|
17 |
+
input: string,
|
18 |
setInput: (value: string) => void,
|
19 |
model: string,
|
20 |
+
provider: ProviderInfo,
|
21 |
+
apiKeys?: Record<string, string>,
|
22 |
) => {
|
23 |
setEnhancingPrompt(true);
|
24 |
setPromptEnhanced(false);
|
25 |
+
|
26 |
const requestBody: any = {
|
27 |
message: input,
|
28 |
model,
|
29 |
provider,
|
30 |
};
|
31 |
+
|
32 |
if (apiKeys) {
|
33 |
requestBody.apiKeys = apiKeys;
|
34 |
}
|
35 |
+
|
36 |
const response = await fetch('/api/enhancer', {
|
37 |
method: 'POST',
|
38 |
body: JSON.stringify(requestBody),
|
39 |
});
|
40 |
+
|
41 |
const reader = response.body?.getReader();
|
42 |
+
|
43 |
const originalInput = input;
|
44 |
+
|
45 |
if (reader) {
|
46 |
const decoder = new TextDecoder();
|
47 |
+
|
48 |
let _input = '';
|
49 |
let _error;
|
50 |
+
|
51 |
try {
|
52 |
setInput('');
|
53 |
+
|
54 |
while (true) {
|
55 |
const { value, done } = await reader.read();
|
56 |
+
|
57 |
if (done) {
|
58 |
break;
|
59 |
}
|
60 |
+
|
61 |
_input += decoder.decode(value);
|
62 |
+
|
63 |
logger.trace('Set input', _input);
|
64 |
+
|
65 |
setInput(_input);
|
66 |
}
|
67 |
} catch (error) {
|
|
|
71 |
if (_error) {
|
72 |
logger.error(_error);
|
73 |
}
|
74 |
+
|
75 |
setEnhancingPrompt(false);
|
76 |
setPromptEnhanced(true);
|
77 |
+
|
78 |
setTimeout(() => {
|
79 |
setInput(_input);
|
80 |
});
|
app/lib/hooks/useViewport.ts
ADDED
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import { useState, useEffect } from 'react';
|
2 |
+
|
3 |
+
const useViewport = (threshold = 1024) => {
|
4 |
+
const [isSmallViewport, setIsSmallViewport] = useState(window.innerWidth < threshold);
|
5 |
+
|
6 |
+
useEffect(() => {
|
7 |
+
const handleResize = () => setIsSmallViewport(window.innerWidth < threshold);
|
8 |
+
window.addEventListener('resize', handleResize);
|
9 |
+
|
10 |
+
return () => {
|
11 |
+
window.removeEventListener('resize', handleResize);
|
12 |
+
};
|
13 |
+
}, [threshold]);
|
14 |
+
|
15 |
+
return isSmallViewport;
|
16 |
+
};
|
17 |
+
|
18 |
+
export default useViewport;
|
app/routes/api.enhancer.ts
CHANGED
@@ -2,7 +2,7 @@ import { type ActionFunctionArgs } from '@remix-run/cloudflare';
|
|
2 |
import { StreamingTextResponse, parseStreamPart } from 'ai';
|
3 |
import { streamText } from '~/lib/.server/llm/stream-text';
|
4 |
import { stripIndents } from '~/utils/stripIndent';
|
5 |
-
import type {
|
6 |
|
7 |
const encoder = new TextEncoder();
|
8 |
const decoder = new TextDecoder();
|
@@ -12,25 +12,27 @@ export async function action(args: ActionFunctionArgs) {
|
|
12 |
}
|
13 |
|
14 |
async function enhancerAction({ context, request }: ActionFunctionArgs) {
|
15 |
-
const { message, model, provider, apiKeys } = await request.json<{
|
16 |
message: string;
|
17 |
model: string;
|
18 |
-
provider:
|
19 |
apiKeys?: Record<string, string>;
|
20 |
}>();
|
21 |
|
22 |
-
|
|
|
|
|
23 |
if (!model || typeof model !== 'string') {
|
24 |
throw new Response('Invalid or missing model', {
|
25 |
status: 400,
|
26 |
-
statusText: 'Bad Request'
|
27 |
});
|
28 |
}
|
29 |
|
30 |
-
if (!
|
31 |
throw new Response('Invalid or missing provider', {
|
32 |
status: 400,
|
33 |
-
statusText: 'Bad Request'
|
34 |
});
|
35 |
}
|
36 |
|
@@ -39,7 +41,9 @@ async function enhancerAction({ context, request }: ActionFunctionArgs) {
|
|
39 |
[
|
40 |
{
|
41 |
role: 'user',
|
42 |
-
content:
|
|
|
|
|
43 |
I want you to improve the user prompt that is wrapped in \`<original_prompt>\` tags.
|
44 |
|
45 |
IMPORTANT: Only respond with the improved prompt and nothing else!
|
@@ -52,23 +56,24 @@ async function enhancerAction({ context, request }: ActionFunctionArgs) {
|
|
52 |
],
|
53 |
context.cloudflare.env,
|
54 |
undefined,
|
55 |
-
apiKeys
|
56 |
);
|
57 |
|
58 |
const transformStream = new TransformStream({
|
59 |
transform(chunk, controller) {
|
60 |
const text = decoder.decode(chunk);
|
61 |
-
const lines = text.split('\n').filter(line => line.trim() !== '');
|
62 |
-
|
63 |
for (const line of lines) {
|
64 |
try {
|
65 |
const parsed = parseStreamPart(line);
|
|
|
66 |
if (parsed.type === 'text') {
|
67 |
controller.enqueue(encoder.encode(parsed.value));
|
68 |
}
|
69 |
} catch (e) {
|
70 |
-
//
|
71 |
-
console.warn('Failed to parse stream part:', line);
|
72 |
}
|
73 |
}
|
74 |
},
|
@@ -83,7 +88,7 @@ async function enhancerAction({ context, request }: ActionFunctionArgs) {
|
|
83 |
if (error instanceof Error && error.message?.includes('API key')) {
|
84 |
throw new Response('Invalid or missing API key', {
|
85 |
status: 401,
|
86 |
-
statusText: 'Unauthorized'
|
87 |
});
|
88 |
}
|
89 |
|
|
|
2 |
import { StreamingTextResponse, parseStreamPart } from 'ai';
|
3 |
import { streamText } from '~/lib/.server/llm/stream-text';
|
4 |
import { stripIndents } from '~/utils/stripIndent';
|
5 |
+
import type { ProviderInfo } from '~/types/model';
|
6 |
|
7 |
const encoder = new TextEncoder();
|
8 |
const decoder = new TextDecoder();
|
|
|
12 |
}
|
13 |
|
14 |
async function enhancerAction({ context, request }: ActionFunctionArgs) {
|
15 |
+
const { message, model, provider, apiKeys } = await request.json<{
|
16 |
message: string;
|
17 |
model: string;
|
18 |
+
provider: ProviderInfo;
|
19 |
apiKeys?: Record<string, string>;
|
20 |
}>();
|
21 |
|
22 |
+
const { name: providerName } = provider;
|
23 |
+
|
24 |
+
// validate 'model' and 'provider' fields
|
25 |
if (!model || typeof model !== 'string') {
|
26 |
throw new Response('Invalid or missing model', {
|
27 |
status: 400,
|
28 |
+
statusText: 'Bad Request',
|
29 |
});
|
30 |
}
|
31 |
|
32 |
+
if (!providerName || typeof providerName !== 'string') {
|
33 |
throw new Response('Invalid or missing provider', {
|
34 |
status: 400,
|
35 |
+
statusText: 'Bad Request',
|
36 |
});
|
37 |
}
|
38 |
|
|
|
41 |
[
|
42 |
{
|
43 |
role: 'user',
|
44 |
+
content:
|
45 |
+
`[Model: ${model}]\n\n[Provider: ${providerName}]\n\n` +
|
46 |
+
stripIndents`
|
47 |
I want you to improve the user prompt that is wrapped in \`<original_prompt>\` tags.
|
48 |
|
49 |
IMPORTANT: Only respond with the improved prompt and nothing else!
|
|
|
56 |
],
|
57 |
context.cloudflare.env,
|
58 |
undefined,
|
59 |
+
apiKeys,
|
60 |
);
|
61 |
|
62 |
const transformStream = new TransformStream({
|
63 |
transform(chunk, controller) {
|
64 |
const text = decoder.decode(chunk);
|
65 |
+
const lines = text.split('\n').filter((line) => line.trim() !== '');
|
66 |
+
|
67 |
for (const line of lines) {
|
68 |
try {
|
69 |
const parsed = parseStreamPart(line);
|
70 |
+
|
71 |
if (parsed.type === 'text') {
|
72 |
controller.enqueue(encoder.encode(parsed.value));
|
73 |
}
|
74 |
} catch (e) {
|
75 |
+
// skip invalid JSON lines
|
76 |
+
console.warn('Failed to parse stream part:', line, e);
|
77 |
}
|
78 |
}
|
79 |
},
|
|
|
88 |
if (error instanceof Error && error.message?.includes('API key')) {
|
89 |
throw new Response('Invalid or missing API key', {
|
90 |
status: 401,
|
91 |
+
statusText: 'Unauthorized',
|
92 |
});
|
93 |
}
|
94 |
|
app/utils/constants.ts
CHANGED
@@ -12,12 +12,12 @@ const PROVIDER_LIST: ProviderInfo[] = [
|
|
12 |
{
|
13 |
name: 'Anthropic',
|
14 |
staticModels: [
|
15 |
-
{ name: 'claude-3-5-sonnet-latest', label: 'Claude 3.5 Sonnet (new)', provider: 'Anthropic' },
|
16 |
-
{ name: 'claude-3-5-sonnet-20240620', label: 'Claude 3.5 Sonnet (old)', provider: 'Anthropic' },
|
17 |
-
{ name: 'claude-3-5-haiku-latest', label: 'Claude 3.5 Haiku (new)', provider: 'Anthropic' },
|
18 |
-
{ name: 'claude-3-opus-latest', label: 'Claude 3 Opus', provider: 'Anthropic' },
|
19 |
-
{ name: 'claude-3-sonnet-20240229', label: 'Claude 3 Sonnet', provider: 'Anthropic' },
|
20 |
-
{ name: 'claude-3-haiku-20240307', label: 'Claude 3 Haiku', provider: 'Anthropic' }
|
21 |
],
|
22 |
getApiKeyLink: "https://console.anthropic.com/settings/keys",
|
23 |
},
|
@@ -36,23 +36,40 @@ const PROVIDER_LIST: ProviderInfo[] = [
|
|
36 |
],
|
37 |
getDynamicModels: getOpenAILikeModels
|
38 |
},
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
39 |
{
|
40 |
name: 'OpenRouter',
|
41 |
staticModels: [
|
42 |
-
{ name: 'gpt-4o', label: 'GPT-4o', provider: '
|
43 |
{
|
44 |
name: 'anthropic/claude-3.5-sonnet',
|
45 |
label: 'Anthropic: Claude 3.5 Sonnet (OpenRouter)',
|
46 |
provider: 'OpenRouter'
|
|
|
47 |
},
|
48 |
-
{ name: 'anthropic/claude-3-haiku', label: 'Anthropic: Claude 3 Haiku (OpenRouter)', provider: 'OpenRouter' },
|
49 |
-
{ name: 'deepseek/deepseek-coder', label: 'Deepseek-Coder V2 236B (OpenRouter)', provider: 'OpenRouter' },
|
50 |
-
{ name: 'google/gemini-flash-1.5', label: 'Google Gemini Flash 1.5 (OpenRouter)', provider: 'OpenRouter' },
|
51 |
-
{ name: 'google/gemini-pro-1.5', label: 'Google Gemini Pro 1.5 (OpenRouter)', provider: 'OpenRouter' },
|
52 |
-
{ name: 'x-ai/grok-beta', label: 'xAI Grok Beta (OpenRouter)', provider: 'OpenRouter' },
|
53 |
-
{ name: 'mistralai/mistral-nemo', label: 'OpenRouter Mistral Nemo (OpenRouter)', provider: 'OpenRouter' },
|
54 |
-
{ name: 'qwen/qwen-110b-chat', label: 'OpenRouter Qwen 110b Chat (OpenRouter)', provider: 'OpenRouter' },
|
55 |
-
{ name: 'cohere/command', label: 'Cohere Command (OpenRouter)', provider: 'OpenRouter' }
|
56 |
],
|
57 |
getDynamicModels: getOpenRouterModels,
|
58 |
getApiKeyLink: 'https://openrouter.ai/settings/keys',
|
@@ -69,59 +86,59 @@ const PROVIDER_LIST: ProviderInfo[] = [
|
|
69 |
}, {
|
70 |
name: 'Groq',
|
71 |
staticModels: [
|
72 |
-
{ name: 'llama-3.1-70b-versatile', label: 'Llama 3.1 70b (Groq)', provider: 'Groq' },
|
73 |
-
{ name: 'llama-3.1-8b-instant', label: 'Llama 3.1 8b (Groq)', provider: 'Groq' },
|
74 |
-
{ name: 'llama-3.2-11b-vision-preview', label: 'Llama 3.2 11b (Groq)', provider: 'Groq' },
|
75 |
-
{ name: 'llama-3.2-3b-preview', label: 'Llama 3.2 3b (Groq)', provider: 'Groq' },
|
76 |
-
{ name: 'llama-3.2-1b-preview', label: 'Llama 3.2 1b (Groq)', provider: 'Groq' }
|
77 |
],
|
78 |
getApiKeyLink: 'https://console.groq.com/keys'
|
79 |
},
|
80 |
{
|
81 |
name: 'HuggingFace',
|
82 |
staticModels: [
|
83 |
-
{ name: 'Qwen/Qwen2.5-Coder-32B-Instruct', label: 'Qwen2.5-Coder-32B-Instruct (HuggingFace)', provider: 'HuggingFace' },
|
84 |
-
{ name: '01-ai/Yi-1.5-34B-Chat', label: 'Yi-1.5-34B-Chat (HuggingFace)', provider: 'HuggingFace' },
|
85 |
-
{ name: 'codellama/CodeLlama-34b-Instruct-hf', label: 'CodeLlama-34b-Instruct (HuggingFace)', provider: 'HuggingFace' },
|
86 |
-
{ name: 'NousResearch/Hermes-3-Llama-3.1-8B', label: 'Hermes-3-Llama-3.1-8B (HuggingFace)', provider: 'HuggingFace' }
|
87 |
],
|
88 |
getApiKeyLink: 'https://huggingface.co/settings/tokens'
|
89 |
},
|
90 |
-
|
91 |
{
|
92 |
name: 'OpenAI',
|
93 |
staticModels: [
|
94 |
-
{ name: 'gpt-4o-mini', label: 'GPT-4o Mini', provider: 'OpenAI' },
|
95 |
-
{ name: 'gpt-4-turbo', label: 'GPT-4 Turbo', provider: 'OpenAI' },
|
96 |
-
{ name: 'gpt-4', label: 'GPT-4', provider: 'OpenAI' },
|
97 |
-
{ name: 'gpt-3.5-turbo', label: 'GPT-3.5 Turbo', provider: 'OpenAI' }
|
98 |
],
|
99 |
getApiKeyLink: "https://platform.openai.com/api-keys",
|
100 |
}, {
|
101 |
name: 'xAI',
|
102 |
staticModels: [
|
103 |
-
{ name: 'grok-beta', label: 'xAI Grok Beta', provider: 'xAI' }
|
104 |
],
|
105 |
getApiKeyLink: 'https://docs.x.ai/docs/quickstart#creating-an-api-key'
|
106 |
}, {
|
107 |
name: 'Deepseek',
|
108 |
staticModels: [
|
109 |
-
{ name: 'deepseek-coder', label: 'Deepseek-Coder', provider: 'Deepseek' },
|
110 |
-
{ name: 'deepseek-chat', label: 'Deepseek-Chat', provider: 'Deepseek' }
|
111 |
],
|
112 |
getApiKeyLink: 'https://platform.deepseek.com/api_keys'
|
113 |
}, {
|
114 |
name: 'Mistral',
|
115 |
staticModels: [
|
116 |
-
{ name: 'open-mistral-7b', label: 'Mistral 7B', provider: 'Mistral' },
|
117 |
-
{ name: 'open-mixtral-8x7b', label: 'Mistral 8x7B', provider: 'Mistral' },
|
118 |
-
{ name: 'open-mixtral-8x22b', label: 'Mistral 8x22B', provider: 'Mistral' },
|
119 |
-
{ name: 'open-codestral-mamba', label: 'Codestral Mamba', provider: 'Mistral' },
|
120 |
-
{ name: 'open-mistral-nemo', label: 'Mistral Nemo', provider: 'Mistral' },
|
121 |
-
{ name: 'ministral-8b-latest', label: 'Mistral 8B', provider: 'Mistral' },
|
122 |
-
{ name: 'mistral-small-latest', label: 'Mistral Small', provider: 'Mistral' },
|
123 |
-
{ name: 'codestral-latest', label: 'Codestral', provider: 'Mistral' },
|
124 |
-
{ name: 'mistral-large-latest', label: 'Mistral Large Latest', provider: 'Mistral' }
|
125 |
],
|
126 |
getApiKeyLink: 'https://console.mistral.ai/api-keys/'
|
127 |
}, {
|
@@ -165,7 +182,8 @@ async function getOllamaModels(): Promise<ModelInfo[]> {
|
|
165 |
return data.models.map((model: OllamaModel) => ({
|
166 |
name: model.name,
|
167 |
label: `${model.name} (${model.details.parameter_size})`,
|
168 |
-
provider: 'Ollama'
|
|
|
169 |
}));
|
170 |
} catch (e) {
|
171 |
return [];
|
@@ -218,8 +236,9 @@ async function getOpenRouterModels(): Promise<ModelInfo[]> {
|
|
218 |
name: m.id,
|
219 |
label: `${m.name} - in:$${(m.pricing.prompt * 1_000_000).toFixed(
|
220 |
2)} out:$${(m.pricing.completion * 1_000_000).toFixed(2)} - context ${Math.floor(
|
221 |
-
|
222 |
-
provider: 'OpenRouter'
|
|
|
223 |
}));
|
224 |
}
|
225 |
|
|
|
12 |
{
|
13 |
name: 'Anthropic',
|
14 |
staticModels: [
|
15 |
+
{ name: 'claude-3-5-sonnet-latest', label: 'Claude 3.5 Sonnet (new)', provider: 'Anthropic', maxTokenAllowed: 8000 },
|
16 |
+
{ name: 'claude-3-5-sonnet-20240620', label: 'Claude 3.5 Sonnet (old)', provider: 'Anthropic', maxTokenAllowed: 8000 },
|
17 |
+
{ name: 'claude-3-5-haiku-latest', label: 'Claude 3.5 Haiku (new)', provider: 'Anthropic', maxTokenAllowed: 8000 },
|
18 |
+
{ name: 'claude-3-opus-latest', label: 'Claude 3 Opus', provider: 'Anthropic', maxTokenAllowed: 8000 },
|
19 |
+
{ name: 'claude-3-sonnet-20240229', label: 'Claude 3 Sonnet', provider: 'Anthropic', maxTokenAllowed: 8000 },
|
20 |
+
{ name: 'claude-3-haiku-20240307', label: 'Claude 3 Haiku', provider: 'Anthropic', maxTokenAllowed: 8000 }
|
21 |
],
|
22 |
getApiKeyLink: "https://console.anthropic.com/settings/keys",
|
23 |
},
|
|
|
36 |
],
|
37 |
getDynamicModels: getOpenAILikeModels
|
38 |
},
|
39 |
+
{
|
40 |
+
name: 'Cohere',
|
41 |
+
staticModels: [
|
42 |
+
{ name: 'command-r-plus-08-2024', label: 'Command R plus Latest', provider: 'Cohere', maxTokenAllowed: 4096 },
|
43 |
+
{ name: 'command-r-08-2024', label: 'Command R Latest', provider: 'Cohere', maxTokenAllowed: 4096 },
|
44 |
+
{ name: 'command-r-plus', label: 'Command R plus', provider: 'Cohere', maxTokenAllowed: 4096 },
|
45 |
+
{ name: 'command-r', label: 'Command R', provider: 'Cohere', maxTokenAllowed: 4096 },
|
46 |
+
{ name: 'command', label: 'Command', provider: 'Cohere', maxTokenAllowed: 4096 },
|
47 |
+
{ name: 'command-nightly', label: 'Command Nightly', provider: 'Cohere', maxTokenAllowed: 4096 },
|
48 |
+
{ name: 'command-light', label: 'Command Light', provider: 'Cohere', maxTokenAllowed: 4096 },
|
49 |
+
{ name: 'command-light-nightly', label: 'Command Light Nightly', provider: 'Cohere', maxTokenAllowed: 4096 },
|
50 |
+
{ name: 'c4ai-aya-expanse-8b', label: 'c4AI Aya Expanse 8b', provider: 'Cohere', maxTokenAllowed: 4096 },
|
51 |
+
{ name: 'c4ai-aya-expanse-32b', label: 'c4AI Aya Expanse 32b', provider: 'Cohere', maxTokenAllowed: 4096 },
|
52 |
+
],
|
53 |
+
getApiKeyLink: 'https://dashboard.cohere.com/api-keys'
|
54 |
+
},
|
55 |
{
|
56 |
name: 'OpenRouter',
|
57 |
staticModels: [
|
58 |
+
{ name: 'gpt-4o', label: 'GPT-4o', provider: 'OpenAI', maxTokenAllowed: 8000 },
|
59 |
{
|
60 |
name: 'anthropic/claude-3.5-sonnet',
|
61 |
label: 'Anthropic: Claude 3.5 Sonnet (OpenRouter)',
|
62 |
provider: 'OpenRouter'
|
63 |
+
, maxTokenAllowed: 8000
|
64 |
},
|
65 |
+
{ name: 'anthropic/claude-3-haiku', label: 'Anthropic: Claude 3 Haiku (OpenRouter)', provider: 'OpenRouter', maxTokenAllowed: 8000 },
|
66 |
+
{ name: 'deepseek/deepseek-coder', label: 'Deepseek-Coder V2 236B (OpenRouter)', provider: 'OpenRouter', maxTokenAllowed: 8000 },
|
67 |
+
{ name: 'google/gemini-flash-1.5', label: 'Google Gemini Flash 1.5 (OpenRouter)', provider: 'OpenRouter', maxTokenAllowed: 8000 },
|
68 |
+
{ name: 'google/gemini-pro-1.5', label: 'Google Gemini Pro 1.5 (OpenRouter)', provider: 'OpenRouter', maxTokenAllowed: 8000 },
|
69 |
+
{ name: 'x-ai/grok-beta', label: 'xAI Grok Beta (OpenRouter)', provider: 'OpenRouter', maxTokenAllowed: 8000 },
|
70 |
+
{ name: 'mistralai/mistral-nemo', label: 'OpenRouter Mistral Nemo (OpenRouter)', provider: 'OpenRouter', maxTokenAllowed: 8000 },
|
71 |
+
{ name: 'qwen/qwen-110b-chat', label: 'OpenRouter Qwen 110b Chat (OpenRouter)', provider: 'OpenRouter', maxTokenAllowed: 8000 },
|
72 |
+
{ name: 'cohere/command', label: 'Cohere Command (OpenRouter)', provider: 'OpenRouter', maxTokenAllowed: 4096 }
|
73 |
],
|
74 |
getDynamicModels: getOpenRouterModels,
|
75 |
getApiKeyLink: 'https://openrouter.ai/settings/keys',
|
|
|
86 |
}, {
|
87 |
name: 'Groq',
|
88 |
staticModels: [
|
89 |
+
{ name: 'llama-3.1-70b-versatile', label: 'Llama 3.1 70b (Groq)', provider: 'Groq', maxTokenAllowed: 8000 },
|
90 |
+
{ name: 'llama-3.1-8b-instant', label: 'Llama 3.1 8b (Groq)', provider: 'Groq', maxTokenAllowed: 8000 },
|
91 |
+
{ name: 'llama-3.2-11b-vision-preview', label: 'Llama 3.2 11b (Groq)', provider: 'Groq', maxTokenAllowed: 8000 },
|
92 |
+
{ name: 'llama-3.2-3b-preview', label: 'Llama 3.2 3b (Groq)', provider: 'Groq', maxTokenAllowed: 8000 },
|
93 |
+
{ name: 'llama-3.2-1b-preview', label: 'Llama 3.2 1b (Groq)', provider: 'Groq', maxTokenAllowed: 8000 }
|
94 |
],
|
95 |
getApiKeyLink: 'https://console.groq.com/keys'
|
96 |
},
|
97 |
{
|
98 |
name: 'HuggingFace',
|
99 |
staticModels: [
|
100 |
+
{ name: 'Qwen/Qwen2.5-Coder-32B-Instruct', label: 'Qwen2.5-Coder-32B-Instruct (HuggingFace)', provider: 'HuggingFace', maxTokenAllowed: 8000 },
|
101 |
+
{ name: '01-ai/Yi-1.5-34B-Chat', label: 'Yi-1.5-34B-Chat (HuggingFace)', provider: 'HuggingFace', maxTokenAllowed: 8000 },
|
102 |
+
{ name: 'codellama/CodeLlama-34b-Instruct-hf', label: 'CodeLlama-34b-Instruct (HuggingFace)', provider: 'HuggingFace', maxTokenAllowed: 8000 },
|
103 |
+
{ name: 'NousResearch/Hermes-3-Llama-3.1-8B', label: 'Hermes-3-Llama-3.1-8B (HuggingFace)', provider: 'HuggingFace', maxTokenAllowed: 8000 }
|
104 |
],
|
105 |
getApiKeyLink: 'https://huggingface.co/settings/tokens'
|
106 |
},
|
107 |
+
|
108 |
{
|
109 |
name: 'OpenAI',
|
110 |
staticModels: [
|
111 |
+
{ name: 'gpt-4o-mini', label: 'GPT-4o Mini', provider: 'OpenAI', maxTokenAllowed: 8000 },
|
112 |
+
{ name: 'gpt-4-turbo', label: 'GPT-4 Turbo', provider: 'OpenAI', maxTokenAllowed: 8000 },
|
113 |
+
{ name: 'gpt-4', label: 'GPT-4', provider: 'OpenAI', maxTokenAllowed: 8000 },
|
114 |
+
{ name: 'gpt-3.5-turbo', label: 'GPT-3.5 Turbo', provider: 'OpenAI', maxTokenAllowed: 8000 }
|
115 |
],
|
116 |
getApiKeyLink: "https://platform.openai.com/api-keys",
|
117 |
}, {
|
118 |
name: 'xAI',
|
119 |
staticModels: [
|
120 |
+
{ name: 'grok-beta', label: 'xAI Grok Beta', provider: 'xAI', maxTokenAllowed: 8000 }
|
121 |
],
|
122 |
getApiKeyLink: 'https://docs.x.ai/docs/quickstart#creating-an-api-key'
|
123 |
}, {
|
124 |
name: 'Deepseek',
|
125 |
staticModels: [
|
126 |
+
{ name: 'deepseek-coder', label: 'Deepseek-Coder', provider: 'Deepseek', maxTokenAllowed: 8000 },
|
127 |
+
{ name: 'deepseek-chat', label: 'Deepseek-Chat', provider: 'Deepseek', maxTokenAllowed: 8000 }
|
128 |
],
|
129 |
getApiKeyLink: 'https://platform.deepseek.com/api_keys'
|
130 |
}, {
|
131 |
name: 'Mistral',
|
132 |
staticModels: [
|
133 |
+
{ name: 'open-mistral-7b', label: 'Mistral 7B', provider: 'Mistral', maxTokenAllowed: 8000 },
|
134 |
+
{ name: 'open-mixtral-8x7b', label: 'Mistral 8x7B', provider: 'Mistral', maxTokenAllowed: 8000 },
|
135 |
+
{ name: 'open-mixtral-8x22b', label: 'Mistral 8x22B', provider: 'Mistral', maxTokenAllowed: 8000 },
|
136 |
+
{ name: 'open-codestral-mamba', label: 'Codestral Mamba', provider: 'Mistral', maxTokenAllowed: 8000 },
|
137 |
+
{ name: 'open-mistral-nemo', label: 'Mistral Nemo', provider: 'Mistral', maxTokenAllowed: 8000 },
|
138 |
+
{ name: 'ministral-8b-latest', label: 'Mistral 8B', provider: 'Mistral', maxTokenAllowed: 8000 },
|
139 |
+
{ name: 'mistral-small-latest', label: 'Mistral Small', provider: 'Mistral', maxTokenAllowed: 8000 },
|
140 |
+
{ name: 'codestral-latest', label: 'Codestral', provider: 'Mistral', maxTokenAllowed: 8000 },
|
141 |
+
{ name: 'mistral-large-latest', label: 'Mistral Large Latest', provider: 'Mistral', maxTokenAllowed: 8000 }
|
142 |
],
|
143 |
getApiKeyLink: 'https://console.mistral.ai/api-keys/'
|
144 |
}, {
|
|
|
182 |
return data.models.map((model: OllamaModel) => ({
|
183 |
name: model.name,
|
184 |
label: `${model.name} (${model.details.parameter_size})`,
|
185 |
+
provider: 'Ollama',
|
186 |
+
maxTokenAllowed:8000,
|
187 |
}));
|
188 |
} catch (e) {
|
189 |
return [];
|
|
|
236 |
name: m.id,
|
237 |
label: `${m.name} - in:$${(m.pricing.prompt * 1_000_000).toFixed(
|
238 |
2)} out:$${(m.pricing.completion * 1_000_000).toFixed(2)} - context ${Math.floor(
|
239 |
+
m.context_length / 1000)}k`,
|
240 |
+
provider: 'OpenRouter',
|
241 |
+
maxTokenAllowed:8000,
|
242 |
}));
|
243 |
}
|
244 |
|
app/utils/types.ts
CHANGED
@@ -25,6 +25,7 @@ export interface ModelInfo {
|
|
25 |
name: string;
|
26 |
label: string;
|
27 |
provider: string;
|
|
|
28 |
}
|
29 |
|
30 |
export interface ProviderInfo {
|
|
|
25 |
name: string;
|
26 |
label: string;
|
27 |
provider: string;
|
28 |
+
maxTokenAllowed: number;
|
29 |
}
|
30 |
|
31 |
export interface ProviderInfo {
|
docker-compose.yaml
CHANGED
@@ -21,6 +21,7 @@ services:
|
|
21 |
- GOOGLE_GENERATIVE_AI_API_KEY=${GOOGLE_GENERATIVE_AI_API_KEY}
|
22 |
- OLLAMA_API_BASE_URL=${OLLAMA_API_BASE_URL}
|
23 |
- VITE_LOG_LEVEL=${VITE_LOG_LEVEL:-debug}
|
|
|
24 |
- RUNNING_IN_DOCKER=true
|
25 |
extra_hosts:
|
26 |
- "host.docker.internal:host-gateway"
|
@@ -48,6 +49,7 @@ services:
|
|
48 |
- GOOGLE_GENERATIVE_AI_API_KEY=${GOOGLE_GENERATIVE_AI_API_KEY}
|
49 |
- OLLAMA_API_BASE_URL=${OLLAMA_API_BASE_URL}
|
50 |
- VITE_LOG_LEVEL=${VITE_LOG_LEVEL:-debug}
|
|
|
51 |
- RUNNING_IN_DOCKER=true
|
52 |
extra_hosts:
|
53 |
- "host.docker.internal:host-gateway"
|
|
|
21 |
- GOOGLE_GENERATIVE_AI_API_KEY=${GOOGLE_GENERATIVE_AI_API_KEY}
|
22 |
- OLLAMA_API_BASE_URL=${OLLAMA_API_BASE_URL}
|
23 |
- VITE_LOG_LEVEL=${VITE_LOG_LEVEL:-debug}
|
24 |
+
- DEFAULT_NUM_CTX=${DEFAULT_NUM_CTX:-32768}
|
25 |
- RUNNING_IN_DOCKER=true
|
26 |
extra_hosts:
|
27 |
- "host.docker.internal:host-gateway"
|
|
|
49 |
- GOOGLE_GENERATIVE_AI_API_KEY=${GOOGLE_GENERATIVE_AI_API_KEY}
|
50 |
- OLLAMA_API_BASE_URL=${OLLAMA_API_BASE_URL}
|
51 |
- VITE_LOG_LEVEL=${VITE_LOG_LEVEL:-debug}
|
52 |
+
- DEFAULT_NUM_CTX=${DEFAULT_NUM_CTX:-32768}
|
53 |
- RUNNING_IN_DOCKER=true
|
54 |
extra_hosts:
|
55 |
- "host.docker.internal:host-gateway"
|
package.json
CHANGED
@@ -27,6 +27,7 @@
|
|
27 |
},
|
28 |
"dependencies": {
|
29 |
"@ai-sdk/anthropic": "^0.0.39",
|
|
|
30 |
"@ai-sdk/google": "^0.0.52",
|
31 |
"@ai-sdk/mistral": "^0.0.43",
|
32 |
"@ai-sdk/openai": "^0.0.66",
|
|
|
27 |
},
|
28 |
"dependencies": {
|
29 |
"@ai-sdk/anthropic": "^0.0.39",
|
30 |
+
"@ai-sdk/cohere": "^1.0.1",
|
31 |
"@ai-sdk/google": "^0.0.52",
|
32 |
"@ai-sdk/mistral": "^0.0.43",
|
33 |
"@ai-sdk/openai": "^0.0.66",
|
pnpm-lock.yaml
CHANGED
@@ -14,6 +14,9 @@ importers:
|
|
14 |
'@ai-sdk/anthropic':
|
15 |
specifier: ^0.0.39
|
16 |
version: 0.0.39([email protected])
|
|
|
|
|
|
|
17 |
'@ai-sdk/google':
|
18 |
specifier: ^0.0.52
|
19 |
version: 0.0.52([email protected])
|
@@ -279,6 +282,12 @@ packages:
|
|
279 |
peerDependencies:
|
280 |
zod: ^3.0.0
|
281 |
|
|
|
|
|
|
|
|
|
|
|
|
|
282 |
'@ai-sdk/[email protected]':
|
283 |
resolution: {integrity: sha512-bfsA/1Ae0SQ6NfLwWKs5SU4MBwlzJjVhK6bTVBicYFjUxg9liK/W76P1Tq/qK9OlrODACz3i1STOIWsFPpIOuQ==}
|
284 |
engines: {node: '>=18'}
|
@@ -324,6 +333,15 @@ packages:
|
|
324 |
zod:
|
325 |
optional: true
|
326 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
327 |
'@ai-sdk/[email protected]':
|
328 |
resolution: {integrity: sha512-oOwPQD8i2Ynpn22cur4sk26FW3mSy6t6/X/K1Ay2yGBKYiSpRyLfObhOrZEGsXDx+3euKy4nEZ193R36NM+tpQ==}
|
329 |
engines: {node: '>=18'}
|
@@ -336,6 +354,10 @@ packages:
|
|
336 |
resolution: {integrity: sha512-XMsNGJdGO+L0cxhhegtqZ8+T6nn4EoShS819OvCgI2kLbYTIvk0GWFGD0AXJmxkxs3DrpsJxKAFukFR7bvTkgQ==}
|
337 |
engines: {node: '>=18'}
|
338 |
|
|
|
|
|
|
|
|
|
339 |
'@ai-sdk/[email protected]':
|
340 |
resolution: {integrity: sha512-1asDpxgmeHWL0/EZPCLENxfOHT+0jce0z/zasRhascodm2S6f6/KZn5doLG9jdmarcb+GjMjFmmwyOVXz3W1xg==}
|
341 |
engines: {node: '>=18'}
|
@@ -3033,6 +3055,10 @@ packages:
|
|
3033 |
resolution: {integrity: sha512-v0eOBUbiaFojBu2s2NPBfYUoRR9GjcDNvCXVaqEf5vVfpIAh9f8RCo4vXTP8c63QRKCFwoLpMpTdPwwhEKVgzA==}
|
3034 |
engines: {node: '>=14.18'}
|
3035 |
|
|
|
|
|
|
|
|
|
3036 | |
3037 |
resolution: {integrity: sha512-/f2Go4TognH/KvCISP7OUsHn85hT9nUkxxA9BEWxFn+Oj9o8ZNLm/40hdlgSLyuOimsrTKLUMEorQexp/aPQeA==}
|
3038 |
|
@@ -5687,6 +5713,12 @@ snapshots:
|
|
5687 |
'@ai-sdk/provider-utils': 1.0.9([email protected])
|
5688 |
zod: 3.23.8
|
5689 |
|
|
|
|
|
|
|
|
|
|
|
|
|
5690 |
'@ai-sdk/[email protected]([email protected])':
|
5691 |
dependencies:
|
5692 |
'@ai-sdk/provider': 0.0.24
|
@@ -5733,6 +5765,15 @@ snapshots:
|
|
5733 |
optionalDependencies:
|
5734 |
zod: 3.23.8
|
5735 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
5736 |
'@ai-sdk/[email protected]':
|
5737 |
dependencies:
|
5738 |
json-schema: 0.4.0
|
@@ -5745,6 +5786,10 @@ snapshots:
|
|
5745 |
dependencies:
|
5746 |
json-schema: 0.4.0
|
5747 |
|
|
|
|
|
|
|
|
|
5748 |
'@ai-sdk/[email protected]([email protected])([email protected])':
|
5749 |
dependencies:
|
5750 |
'@ai-sdk/provider-utils': 1.0.20([email protected])
|
@@ -8751,6 +8796,8 @@ snapshots:
|
|
8751 |
|
8752 | |
8753 |
|
|
|
|
|
8754 | |
8755 |
dependencies:
|
8756 |
md5.js: 1.3.5
|
|
|
14 |
'@ai-sdk/anthropic':
|
15 |
specifier: ^0.0.39
|
16 |
version: 0.0.39([email protected])
|
17 |
+
'@ai-sdk/cohere':
|
18 |
+
specifier: ^1.0.1
|
19 |
+
version: 1.0.1([email protected])
|
20 |
'@ai-sdk/google':
|
21 |
specifier: ^0.0.52
|
22 |
version: 0.0.52([email protected])
|
|
|
282 |
peerDependencies:
|
283 |
zod: ^3.0.0
|
284 |
|
285 |
+
'@ai-sdk/[email protected]':
|
286 |
+
resolution: {integrity: sha512-xLaSYl/hs9EqfpvT9PvqZrDWjJPQPZBd0iT32T6812vN6kwuEQ6sSgQvqHWczIqxeej2GNRgMQwDL6Lh0L5pZw==}
|
287 |
+
engines: {node: '>=18'}
|
288 |
+
peerDependencies:
|
289 |
+
zod: ^3.0.0
|
290 |
+
|
291 |
'@ai-sdk/[email protected]':
|
292 |
resolution: {integrity: sha512-bfsA/1Ae0SQ6NfLwWKs5SU4MBwlzJjVhK6bTVBicYFjUxg9liK/W76P1Tq/qK9OlrODACz3i1STOIWsFPpIOuQ==}
|
293 |
engines: {node: '>=18'}
|
|
|
333 |
zod:
|
334 |
optional: true
|
335 |
|
336 |
+
'@ai-sdk/[email protected]':
|
337 |
+
resolution: {integrity: sha512-TNg7rPhRtETB2Z9F0JpOvpGii9Fs8EWM8nYy1jEkvSXkrPJ6b/9zVnDdaJsmLFDyrMbOsPJlkblYtmYEQou36w==}
|
338 |
+
engines: {node: '>=18'}
|
339 |
+
peerDependencies:
|
340 |
+
zod: ^3.0.0
|
341 |
+
peerDependenciesMeta:
|
342 |
+
zod:
|
343 |
+
optional: true
|
344 |
+
|
345 |
'@ai-sdk/[email protected]':
|
346 |
resolution: {integrity: sha512-oOwPQD8i2Ynpn22cur4sk26FW3mSy6t6/X/K1Ay2yGBKYiSpRyLfObhOrZEGsXDx+3euKy4nEZ193R36NM+tpQ==}
|
347 |
engines: {node: '>=18'}
|
|
|
354 |
resolution: {integrity: sha512-XMsNGJdGO+L0cxhhegtqZ8+T6nn4EoShS819OvCgI2kLbYTIvk0GWFGD0AXJmxkxs3DrpsJxKAFukFR7bvTkgQ==}
|
355 |
engines: {node: '>=18'}
|
356 |
|
357 |
+
'@ai-sdk/[email protected]':
|
358 |
+
resolution: {integrity: sha512-Sj29AzooJ7SYvhPd+AAWt/E7j63E9+AzRnoMHUaJPRYzOd/WDrVNxxv85prF9gDcQ7XPVlSk9j6oAZV9/DXYpA==}
|
359 |
+
engines: {node: '>=18'}
|
360 |
+
|
361 |
'@ai-sdk/[email protected]':
|
362 |
resolution: {integrity: sha512-1asDpxgmeHWL0/EZPCLENxfOHT+0jce0z/zasRhascodm2S6f6/KZn5doLG9jdmarcb+GjMjFmmwyOVXz3W1xg==}
|
363 |
engines: {node: '>=18'}
|
|
|
3055 |
resolution: {integrity: sha512-v0eOBUbiaFojBu2s2NPBfYUoRR9GjcDNvCXVaqEf5vVfpIAh9f8RCo4vXTP8c63QRKCFwoLpMpTdPwwhEKVgzA==}
|
3056 |
engines: {node: '>=14.18'}
|
3057 |
|
3058 | |
3059 |
+
resolution: {integrity: sha512-T1C0XCUimhxVQzW4zFipdx0SficT651NnkR0ZSH3yQwh+mFMdLfgjABVi4YtMTtaL4s168593DaoaRLMqryavA==}
|
3060 |
+
engines: {node: '>=18.0.0'}
|
3061 |
+
|
3062 | |
3063 |
resolution: {integrity: sha512-/f2Go4TognH/KvCISP7OUsHn85hT9nUkxxA9BEWxFn+Oj9o8ZNLm/40hdlgSLyuOimsrTKLUMEorQexp/aPQeA==}
|
3064 |
|
|
|
5713 |
'@ai-sdk/provider-utils': 1.0.9([email protected])
|
5714 |
zod: 3.23.8
|
5715 |
|
5716 |
+
'@ai-sdk/[email protected]([email protected])':
|
5717 |
+
dependencies:
|
5718 |
+
'@ai-sdk/provider': 1.0.0
|
5719 |
+
'@ai-sdk/provider-utils': 2.0.1([email protected])
|
5720 |
+
zod: 3.23.8
|
5721 |
+
|
5722 |
'@ai-sdk/[email protected]([email protected])':
|
5723 |
dependencies:
|
5724 |
'@ai-sdk/provider': 0.0.24
|
|
|
5765 |
optionalDependencies:
|
5766 |
zod: 3.23.8
|
5767 |
|
5768 |
+
'@ai-sdk/[email protected]([email protected])':
|
5769 |
+
dependencies:
|
5770 |
+
'@ai-sdk/provider': 1.0.0
|
5771 |
+
eventsource-parser: 3.0.0
|
5772 |
+
nanoid: 3.3.7
|
5773 |
+
secure-json-parse: 2.7.0
|
5774 |
+
optionalDependencies:
|
5775 |
+
zod: 3.23.8
|
5776 |
+
|
5777 |
'@ai-sdk/[email protected]':
|
5778 |
dependencies:
|
5779 |
json-schema: 0.4.0
|
|
|
5786 |
dependencies:
|
5787 |
json-schema: 0.4.0
|
5788 |
|
5789 |
+
'@ai-sdk/[email protected]':
|
5790 |
+
dependencies:
|
5791 |
+
json-schema: 0.4.0
|
5792 |
+
|
5793 |
'@ai-sdk/[email protected]([email protected])([email protected])':
|
5794 |
dependencies:
|
5795 |
'@ai-sdk/provider-utils': 1.0.20([email protected])
|
|
|
8796 |
|
8797 | |
8798 |
|
8799 |
+
[email protected]: {}
|
8800 |
+
|
8801 | |
8802 |
dependencies:
|
8803 |
md5.js: 1.3.5
|