Spaces:
Running
Running
Commit
·
0378a63
1
Parent(s):
436a794
use 15 max tokens
Browse files- src/routes/+page.svelte +5 -6
src/routes/+page.svelte
CHANGED
|
@@ -4,7 +4,7 @@
|
|
| 4 |
import * as webllm from "@mlc-ai/web-llm";
|
| 5 |
import { onMount } from 'svelte';
|
| 6 |
|
| 7 |
-
let selectedModel = "smollm-360M-instruct-add-basics-
|
| 8 |
|
| 9 |
let engine: webllm.MLCEngineInterface;
|
| 10 |
let isLoading = false;
|
|
@@ -26,9 +26,9 @@
|
|
| 26 |
|
| 27 |
const appConfig: webllm.AppConfig = {
|
| 28 |
model_list: [{
|
| 29 |
-
model: `https://huggingface.co/reach-vb/smollm-360M-instruct-add-basics-
|
| 30 |
-
model_id: 'smollm-360M-instruct-add-basics-
|
| 31 |
-
model_lib: `${webllm.modelLibURLPrefix}${webllm.modelVersion}/SmolLM-360M-Instruct-
|
| 32 |
overrides: { context_window_size: 2048 },
|
| 33 |
},
|
| 34 |
{
|
|
@@ -71,10 +71,9 @@
|
|
| 71 |
console.log("Generating completion:", content);
|
| 72 |
const response = await engine.chat.completions.create({
|
| 73 |
messages: [
|
| 74 |
-
{role:"system", content: "You are a helpful AI assistant. Try your best to answer the users request."},
|
| 75 |
{role: "user", content: content}
|
| 76 |
],
|
| 77 |
-
max_tokens:
|
| 78 |
});
|
| 79 |
|
| 80 |
outputText = response.choices[0].message.content || "";
|
|
|
|
| 4 |
import * as webllm from "@mlc-ai/web-llm";
|
| 5 |
import { onMount } from 'svelte';
|
| 6 |
|
| 7 |
+
let selectedModel = "smollm-360M-instruct-add-basics-q0f32-MLC";
|
| 8 |
|
| 9 |
let engine: webllm.MLCEngineInterface;
|
| 10 |
let isLoading = false;
|
|
|
|
| 26 |
|
| 27 |
const appConfig: webllm.AppConfig = {
|
| 28 |
model_list: [{
|
| 29 |
+
model: `https://huggingface.co/reach-vb/smollm-360M-instruct-add-basics-q0f32-MLC`,
|
| 30 |
+
model_id: 'smollm-360M-instruct-add-basics-q0f32-MLC',
|
| 31 |
+
model_lib: `${webllm.modelLibURLPrefix}${webllm.modelVersion}/SmolLM-360M-Instruct-q0f32-ctx2k_cs1k-webgpu.wasm`,
|
| 32 |
overrides: { context_window_size: 2048 },
|
| 33 |
},
|
| 34 |
{
|
|
|
|
| 71 |
console.log("Generating completion:", content);
|
| 72 |
const response = await engine.chat.completions.create({
|
| 73 |
messages: [
|
|
|
|
| 74 |
{role: "user", content: content}
|
| 75 |
],
|
| 76 |
+
max_tokens: 15,
|
| 77 |
});
|
| 78 |
|
| 79 |
outputText = response.choices[0].message.content || "";
|