Rooni commited on
Commit
61515b8
·
verified ·
1 Parent(s): e78c52b

Update server.js

Browse files
Files changed (1) hide show
  1. server.js +36 -30
server.js CHANGED
@@ -1,7 +1,7 @@
1
  require('dotenv').config();
2
  const express = require('express');
3
  const rateLimit = require('express-rate-limit');
4
- const { HfInference } = require('@huggingface/inference');
5
 
6
  const app = express();
7
  app.use(express.json());
@@ -36,35 +36,41 @@ app.post('/update', async (req, res) => {
36
  });
37
 
38
  async function sendRequest(prompt, prs) {
39
- const hf_api_key = getRandomApiKey();
40
- const client = new HfInference(hf_api_key);
41
-
42
- try {
43
- const chatCompletion = await client.chatCompletion({
44
- model: "Qwen/Qwen2.5-Coder-32B-Instruct",
45
- messages: [
46
- {
47
- role: "system",
48
- content: prs
49
- },
50
- {
51
- role: "user",
52
- content: prompt
53
- }
54
- ],
55
- max_tokens: 1200,
56
- });
57
-
58
- if (chatCompletion && chatCompletion.choices && chatCompletion.choices.length > 0 && chatCompletion.choices[0].message) {
59
- return chatCompletion.choices[0].message.content.trim();
60
- } else {
61
- throw new Error("Ошибка прочтения ответа");
62
- }
63
- } catch (error) {
64
- console.error("Ошибка при обращении к Hugging Face:", error);
65
- throw new Error("Ошибка при генерации");
 
 
66
  }
 
 
 
67
  }
 
68
 
69
  app.post('/pl', async (req, res) => {
70
  const prompt = req.body.prompt;
@@ -84,7 +90,7 @@ app.post('/pl', async (req, res) => {
84
  const content = await sendRequest(prompt, prs);
85
  res.json({ content });
86
  } catch (error) {
87
- res.json({ content: `{"error":"", "title":"Ошибка", "text":"Произошла ошибка на сервере. (${error.message})", "okb":"Ок", "oklink":"", "cancelable":"true"}` });
88
  }
89
  });
90
 
@@ -114,4 +120,4 @@ app.post('/plbeta', async (req, res) => {
114
  const port = 7860;
115
  app.listen(port, () => {
116
  console.log(`API сервер запущен на порту ${port}`);
117
- });
 
1
  require('dotenv').config();
2
  const express = require('express');
3
  const rateLimit = require('express-rate-limit');
4
+ const { Configuration, OpenAIApi } = require('openai');
5
 
6
  const app = express();
7
  app.use(express.json());
 
36
  });
37
 
38
  async function sendRequest(prompt, prs) {
39
+ const apiKey = getRandomApiKey();
40
+
41
+ const configuration = new Configuration({
42
+ apiKey: apiKey,
43
+ basePath: process.env.BASE_URL || undefined,
44
+ });
45
+
46
+ const openai = new OpenAIApi(configuration);
47
+
48
+ try {
49
+ const response = await openai.createChatCompletion({
50
+ model: process.env.MODEL_NAME || 'gpt-3.5-turbo',
51
+ messages: [
52
+ {
53
+ role: 'system',
54
+ content: prs
55
+ },
56
+ {
57
+ role: 'user',
58
+ content: prompt
59
+ }
60
+ ],
61
+ max_tokens: 1200,
62
+ });
63
+
64
+ if (response.data && response.data.choices && response.data.choices.length > 0) {
65
+ return response.data.choices[0].message.content.trim();
66
+ } else {
67
+ throw new Error('Ошибка прочтения ответа');
68
  }
69
+ } catch (error) {
70
+ console.error('Ошибка при обращении к OpenAI:', error.message);
71
+ throw new Error('Ошибка при генерации');
72
  }
73
+ }
74
 
75
  app.post('/pl', async (req, res) => {
76
  const prompt = req.body.prompt;
 
90
  const content = await sendRequest(prompt, prs);
91
  res.json({ content });
92
  } catch (error) {
93
+ res.json({ content: `{"error":"", "title":"Ошибка", "text":"Произошла ошибка на сервере. (${error.message})", "okb":"Ок", "oklink":"", "cancelable":"true"}` });
94
  }
95
  });
96
 
 
120
  const port = 7860;
121
  app.listen(port, () => {
122
  console.log(`API сервер запущен на порту ${port}`);
123
+ });