Oliver Jägle commited on
Commit
4589014
·
unverified ·
2 Parent(s): 5bd4541 753f0b3

Merge remote-tracking branch 'upstream/main' into linting

Browse files
app/lib/.server/llm/model.ts CHANGED
@@ -10,9 +10,12 @@ import { ollama } from 'ollama-ai-provider';
10
  import { createOpenRouter } from '@openrouter/ai-sdk-provider';
11
  import { createMistral } from '@ai-sdk/mistral';
12
  import { createCohere } from '@ai-sdk/cohere';
 
13
 
14
  export const DEFAULT_NUM_CTX = process.env.DEFAULT_NUM_CTX ? parseInt(process.env.DEFAULT_NUM_CTX, 10) : 32768;
15
 
 
 
16
  export function getAnthropicModel(apiKey: OptionalApiKey, model: string) {
17
  const anthropic = createAnthropic({
18
  apiKey,
@@ -20,9 +23,6 @@ export function getAnthropicModel(apiKey: OptionalApiKey, model: string) {
20
 
21
  return anthropic(model);
22
  }
23
-
24
- type OptionalApiKey = string | undefined;
25
-
26
  export function getOpenAILikeModel(baseURL: string, apiKey: OptionalApiKey, model: string) {
27
  const openai = createOpenAI({
28
  baseURL,
@@ -85,7 +85,7 @@ export function getHuggingFaceModel(apiKey: OptionalApiKey, model: string) {
85
  export function getOllamaModel(baseURL: string, model: string) {
86
  const ollamaInstance = ollama(model, {
87
  numCtx: DEFAULT_NUM_CTX,
88
- });
89
 
90
  ollamaInstance.config.baseURL = `${baseURL}/api`;
91
 
 
10
  import { createOpenRouter } from '@openrouter/ai-sdk-provider';
11
  import { createMistral } from '@ai-sdk/mistral';
12
  import { createCohere } from '@ai-sdk/cohere';
13
+ import type { LanguageModelV1 } from 'ai';
14
 
15
  export const DEFAULT_NUM_CTX = process.env.DEFAULT_NUM_CTX ? parseInt(process.env.DEFAULT_NUM_CTX, 10) : 32768;
16
 
17
+ type OptionalApiKey = string | undefined;
18
+
19
  export function getAnthropicModel(apiKey: OptionalApiKey, model: string) {
20
  const anthropic = createAnthropic({
21
  apiKey,
 
23
 
24
  return anthropic(model);
25
  }
 
 
 
26
  export function getOpenAILikeModel(baseURL: string, apiKey: OptionalApiKey, model: string) {
27
  const openai = createOpenAI({
28
  baseURL,
 
85
  export function getOllamaModel(baseURL: string, model: string) {
86
  const ollamaInstance = ollama(model, {
87
  numCtx: DEFAULT_NUM_CTX,
88
+ }) as LanguageModelV1 & { config: any };
89
 
90
  ollamaInstance.config.baseURL = `${baseURL}/api`;
91
 
app/lib/.server/llm/stream-text.ts CHANGED
@@ -1,7 +1,6 @@
1
- /*
2
- * @ts-nocheck
3
- * Preventing TS checks with files presented in the video for a better presentation.
4
- */
5
  import { streamText as _streamText, convertToCoreMessages } from 'ai';
6
  import { getModel } from '~/lib/.server/llm/model';
7
  import { MAX_TOKENS } from './constants';
 
1
+ // eslint-disable-next-line @typescript-eslint/ban-ts-comment
2
+ // @ts-nocheck – TODO: Provider proper types
3
+
 
4
  import { streamText as _streamText, convertToCoreMessages } from 'ai';
5
  import { getModel } from '~/lib/.server/llm/model';
6
  import { MAX_TOKENS } from './constants';
app/routes/api.chat.ts CHANGED
@@ -1,7 +1,6 @@
1
- /*
2
- * @ts-nocheck
3
- * Preventing TS checks with files presented in the video for a better presentation.
4
- */
5
  import { type ActionFunctionArgs } from '@remix-run/cloudflare';
6
  import { MAX_RESPONSE_SEGMENTS, MAX_TOKENS } from '~/lib/.server/llm/constants';
7
  import { CONTINUE_PROMPT } from '~/lib/.server/llm/prompts';
 
1
+ // eslint-disable-next-line @typescript-eslint/ban-ts-comment
2
+ // @ts-nocheck – TODO: Provider proper types
3
+
 
4
  import { type ActionFunctionArgs } from '@remix-run/cloudflare';
5
  import { MAX_RESPONSE_SEGMENTS, MAX_TOKENS } from '~/lib/.server/llm/constants';
6
  import { CONTINUE_PROMPT } from '~/lib/.server/llm/prompts';
app/utils/constants.ts CHANGED
@@ -167,6 +167,48 @@ const PROVIDER_LIST: ProviderInfo[] = [
167
  provider: 'HuggingFace',
168
  maxTokenAllowed: 8000,
169
  },
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
170
  ],
171
  getApiKeyLink: 'https://huggingface.co/settings/tokens',
172
  },
 
167
  provider: 'HuggingFace',
168
  maxTokenAllowed: 8000,
169
  },
170
+ {
171
+ name: 'Qwen/Qwen2.5-Coder-32B-Instruct',
172
+ label: 'Qwen2.5-Coder-32B-Instruct (HuggingFace)',
173
+ provider: 'HuggingFace',
174
+ maxTokenAllowed: 8000,
175
+ },
176
+ {
177
+ name: 'Qwen/Qwen2.5-72B-Instruct',
178
+ label: 'Qwen2.5-72B-Instruct (HuggingFace)',
179
+ provider: 'HuggingFace',
180
+ maxTokenAllowed: 8000,
181
+ },
182
+ {
183
+ name: 'meta-llama/Llama-3.1-70B-Instruct',
184
+ label: 'Llama-3.1-70B-Instruct (HuggingFace)',
185
+ provider: 'HuggingFace',
186
+ maxTokenAllowed: 8000,
187
+ },
188
+ {
189
+ name: 'meta-llama/Llama-3.1-405B',
190
+ label: 'Llama-3.1-405B (HuggingFace)',
191
+ provider: 'HuggingFace',
192
+ maxTokenAllowed: 8000,
193
+ },
194
+ {
195
+ name: '01-ai/Yi-1.5-34B-Chat',
196
+ label: 'Yi-1.5-34B-Chat (HuggingFace)',
197
+ provider: 'HuggingFace',
198
+ maxTokenAllowed: 8000,
199
+ },
200
+ {
201
+ name: 'codellama/CodeLlama-34b-Instruct-hf',
202
+ label: 'CodeLlama-34b-Instruct (HuggingFace)',
203
+ provider: 'HuggingFace',
204
+ maxTokenAllowed: 8000,
205
+ },
206
+ {
207
+ name: 'NousResearch/Hermes-3-Llama-3.1-8B',
208
+ label: 'Hermes-3-Llama-3.1-8B (HuggingFace)',
209
+ provider: 'HuggingFace',
210
+ maxTokenAllowed: 8000,
211
+ },
212
  ],
213
  getApiKeyLink: 'https://huggingface.co/settings/tokens',
214
  },
app/utils/logger.ts CHANGED
@@ -11,7 +11,7 @@ interface Logger {
11
  setLevel: (level: DebugLevel) => void;
12
  }
13
 
14
- let currentLevel: DebugLevel = (import.meta.env.VITE_LOG_LEVEL ?? import.meta.env.DEV) ? 'debug' : 'info';
15
 
16
  const isWorker = 'HTMLRewriter' in globalThis;
17
  const supportsColor = !isWorker;
 
11
  setLevel: (level: DebugLevel) => void;
12
  }
13
 
14
+ let currentLevel: DebugLevel = import.meta.env.VITE_LOG_LEVEL ?? import.meta.env.DEV ? 'debug' : 'info';
15
 
16
  const isWorker = 'HTMLRewriter' in globalThis;
17
  const supportsColor = !isWorker;
package.json CHANGED
@@ -12,7 +12,7 @@
12
  "test": "vitest --run",
13
  "test:watch": "vitest",
14
  "lint": "eslint --cache --cache-location ./node_modules/.cache/eslint app",
15
- "lint:fix": "pnpm run lint -- --fix",
16
  "start": "bindings=$(./bindings.sh) && wrangler pages dev ./build/client $bindings",
17
  "dockerstart": "bindings=$(./bindings.sh) && wrangler pages dev ./build/client $bindings --ip 0.0.0.0 --port 5173 --no-show-interactive-dev-session",
18
  "dockerrun": "docker run -it -d --name bolt-ai-live -p 5173:5173 --env-file .env.local bolt-ai",
 
12
  "test": "vitest --run",
13
  "test:watch": "vitest",
14
  "lint": "eslint --cache --cache-location ./node_modules/.cache/eslint app",
15
+ "lint:fix": "npm run lint -- --fix && prettier app --write",
16
  "start": "bindings=$(./bindings.sh) && wrangler pages dev ./build/client $bindings",
17
  "dockerstart": "bindings=$(./bindings.sh) && wrangler pages dev ./build/client $bindings --ip 0.0.0.0 --port 5173 --no-show-interactive-dev-session",
18
  "dockerrun": "docker run -it -d --name bolt-ai-live -p 5173:5173 --env-file .env.local bolt-ai",