File size: 1,460 Bytes
18240e8
 
7895d85
18240e8
 
7895d85
18240e8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7895d85
18240e8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7895d85
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
import { d as private_env } from './shared-server-49TKSBDM.js';
import { c as redirect, b as base } from './index-JNnR1J8_.js';
import { L as LlamaCppService } from './LlamaCppService-lwZ2ZT0u.js';
import 'fs';
import 'path';
import 'node:dns';

const POST = async ({ locals, request }) => {
  const body = await request.json();
  console.log(private_env.LLM_API_URL);
  const abortController = new AbortController();
  let llmService = new LlamaCppService(private_env.LLM_API_URL);
  let llmGenerator = await llmService.predict(body.userprompt, { abortController });
  const stream = new ReadableStream({
    async start(controller) {
      try {
        for await (const output of await llmGenerator({ prompt: body.userprompt })) {
          controller.enqueue(output.token.text);
        }
      } catch (error2) {
        if (error2.name === "AbortError") {
          console.log("Request was aborted during LLMServer prediction.");
        } else {
          console.error("Error during LLMServer prediction:", error2);
        }
        console.log(error2);
      }
      controller.close();
    },
    cancel() {
      console.log("ReadableStream canceled and aborted");
      abortController.abort();
    }
  });
  return new Response(stream, {
    headers: {
      "content-type": "text/event-stream"
    }
  });
};
const GET = async () => {
  throw redirect(302, `${base}/`);
};

export { GET, POST };
//# sourceMappingURL=_server.ts-sBA9AkSc.js.map