Spaces:
Running
Running
Switch generateTitle to use Hugging Face Llama model
Browse filesReplaces OpenAI GPT-3.5-turbo with Hugging Face's meta-llama/Llama-3.1-8B-Instruct model for the generateTitle function. Updates the import and API call accordingly.
- app/actions.ts +3 -2
app/actions.ts
CHANGED
@@ -1,6 +1,7 @@
|
|
1 |
"use server";
|
2 |
|
3 |
import { openai } from "@/lib/openai-client";
|
|
|
4 |
import { z } from "zod";
|
5 |
import { startMcpSandbox } from "@/lib/mcp-sandbox";
|
6 |
|
@@ -48,8 +49,8 @@ export async function generateTitle(messages: any[]): Promise<string> {
|
|
48 |
content: Array.isArray(m.content) ? m.content.join("\n") : m.content,
|
49 |
}));
|
50 |
|
51 |
-
const response = await
|
52 |
-
model: "
|
53 |
max_tokens: 30,
|
54 |
temperature: 0.4,
|
55 |
messages: [
|
|
|
1 |
"use server";
|
2 |
|
3 |
import { openai } from "@/lib/openai-client";
|
4 |
+
import { hf } from "@/lib/hf-client";
|
5 |
import { z } from "zod";
|
6 |
import { startMcpSandbox } from "@/lib/mcp-sandbox";
|
7 |
|
|
|
49 |
content: Array.isArray(m.content) ? m.content.join("\n") : m.content,
|
50 |
}));
|
51 |
|
52 |
+
const response = await hf.chat.completions.create({
|
53 |
+
model: "meta-llama/Llama-3.1-8B-Instruct",
|
54 |
max_tokens: 30,
|
55 |
temperature: 0.4,
|
56 |
messages: [
|