File size: 9,198 Bytes
e97be0e |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 |
import zod from 'https://cdn.jsdelivr.net/npm/[email protected]/+esm'
/**
* Met en forme le prompt template passé en paramètres avec les arguments
* @param {String} template
* @param {Object} args
*/
export function formatTemplate(template, args) {
return template.replace(/\{\{(\w+)\}\}/g, (_, key) => {
// 'match' est la correspondance complète (ex: "{nom}")
// 'key' est le contenu du premier groupe capturé (ex: "nom")
if (key in args)
return args[key];
// Si la clé n'est pas trouvée dans args, on laisse le placeholder tel quel.
return "";
});
}
/**
* Recupère le prompt pour la tâche spécifiée.
* @param {String} task
*/
export async function retrieveTemplate(task) {
const req = await fetch(`/prompt/${task}`)
return await req.text();
}
/**
* Lance un deep search sur le serveur pour les topics donnés.
* @param {Array} topics
*/
async function performDeepSearch(topics) {
const response = await fetch('/solutions/search_prior_art', {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({ topics: topics })
});
const results = await response.json();
}
/**
* Genère une completion avec le LLM specifié
* @param {String} providerUrl - URL du provider du LLM
* @param {String} modelName - Nom du modèle à appeler
* @param {String} apiKey - API key a utiliser
* @param {Array<{role: string, content: string}>} messages - Liste de messages à passer au modèle
* @param {Number} temperature - Température à utiliser pour la génération
*/
export async function generateCompletion(providerUrl, modelName, apiKey, messages, temperature = 0.5) {
const genEndpoint = providerUrl + "/chat/completions"
try {
const response = await fetch(genEndpoint, {
method: 'POST',
headers: {
'Content-Type': 'application/json',
'Authorization': `Bearer ${apiKey}`, // OpenAI-like authorization header
},
body: JSON.stringify({
model: modelName,
messages: messages,
temperature: temperature,
}),
});
if (!response.ok) {
const errorData = await response.json();
throw new Error(`API request failed with status ${response.status}: ${errorData.error?.message || 'Unknown error'}`);
}
const data = await response.json();
if (data.choices && data.choices.length > 0 && data.choices[0].message && data.choices[0].message.content)
return data.choices[0].message.content;
} catch (error) {
console.error("Error calling private LLM :", error);
throw error;
}
}
/**
* Genère une completion avec le LLM specifié
* @param {String} providerUrl - URL du provider du LLM
* @param {String} modelName - Nom du modèle à appeler
* @param {String} apiKey - API key a utiliser
* @param {Array<{role: string, content: string}>} messages - Liste de messages à passer au modèle
* @param {Object} schema - Zod schema to use for structured generation
* @param {Number} temperature - Température à utiliser pour la génération
*/
export async function generateStructuredCompletion(providerUrl, modelName, apiKey, messages, schema, temperature = 0.5) {
const genEndpoint = providerUrl + "/chat/completions";
try {
const response = await fetch(genEndpoint, {
method: 'POST',
headers: {
'Content-Type': 'application/json',
'Authorization': `Bearer ${apiKey}`,
},
body: JSON.stringify({
model: modelName,
messages: messages,
temperature: temperature,
response_format: { type: "json_object" }
}),
});
if (!response.ok) {
const errorData = await response.json();
throw new Error(`API request failed with status ${response.status}: ${errorData.error?.message || 'Unknown error'}`);
}
const data = await response.json();
console.log(data.choices[0].message.content);
// parse json output
const parsedJSON = JSON.parse(data.choices[0].message.content.replace('```json', '').replace("```", ""));
// validate output with zod
const validatedSchema = schema.parse(parsedJSON);
return validatedSchema;
} catch (error) {
console.error("Error calling private LLM :", error);
throw error;
}
}
/**
* Retrieves a list of available models from an OpenAI-compatible API using fetch.
*
* @param {string} providerUrl The base URL of the OpenAI-compatible API endpoint (e.g., "http://localhost:8000/v1").
* @param {string} apiKey The API key for authentication.
* @returns {Promise<Array<string>>} A promise that resolves with an array of model names, or rejects with an error.
*/
export async function getModelList(providerUrl, apiKey) {
try {
// Construct the full URL for the models endpoint
const modelsUrl = `${providerUrl}/models`;
console.log(modelsUrl);
// Make a GET request to the models endpoint using fetch
const response = await fetch(modelsUrl, {
method: 'GET', // Explicitly state the method
headers: {
'Authorization': `Bearer ${apiKey}`, // OpenAI-compatible authorization header
'Content-Type': 'application/json',
},
});
// Check if the request was successful (status code 200-299)
if (!response.ok) {
// If the response is not OK, try to get more error details
const errorData = await response.json().catch(() => ({})); // Attempt to parse JSON error, fallback to empty object
throw new Error(`HTTP error! Status: ${response.status}, Message: ${errorData.message || response.statusText}`);
}
// Parse the JSON response body
const data = await response.json();
// The response data structure for OpenAI-compatible APIs usually contains a 'data' array
// where each item represents a model and has an 'id' property.
if (data && Array.isArray(data.data)) {
const allModelNames = data.data.map(model => model.id);
// Filter out models containing "embedding" (case-insensitive)
const filteredModelNames = allModelNames.filter(modelName =>
!modelName.toLowerCase().includes('embedding')
);
return filteredModelNames;
} else {
// Handle cases where the response format is unexpected
throw new Error('Unexpected response format from the API. Could not find model list.');
}
} catch (error) {
console.error('Error fetching model list:', error.message);
// Re-throw the error to allow the caller to handle it
throw error;
}
}
// # ========================================================================================== Idea assessment logic ==================================================================
const StructuredAssessmentOutput = zod.object({
final_verdict: zod.string(),
summary: zod.string(),
insights: zod.array(zod.string()),
});
export async function assessSolution(providerUrl, modelName, apiKey, solution, assessment_rules, portfolio_info) {
const template = await retrieveTemplate("assess");
const assessment_template = formatTemplate(template, {
notation_criterias: assessment_rules,
business: portfolio_info,
problem_description: solution.problem_description,
solution_description: solution.solution_description,
});
const assessment_full = await generateCompletion(providerUrl, modelName, apiKey, [
{ role: "user", content: assessment_template }
]);
const structured_template = await retrieveTemplate("extract");
const structured_filled_template = formatTemplate(structured_template, {
"report": assessment_full,
"response_schema": zod.toJSONSchema(StructuredAssessmentOutput)
})
const extracted_info = await generateStructuredCompletion(providerUrl, modelName, apiKey, [{ role: "user", content: structured_filled_template }], StructuredAssessmentOutput);
return { assessment_full, extracted_info };
}
export async function refineSolution(providerUrl, modelName, apiKey, solution, insights, assessment_rules, portfolio_info) {
const template = await retrieveTemplate("refine");
const refine_template = formatTemplate(template, {
"problem_description": solution.problem_description,
"solution_description": solution.solution_description,
"insights": insights.map(i => i.text).join("\n -"),
"business_info": portfolio_info,
});
console.log(refine_template);
const refined_idea = await generateCompletion(providerUrl, modelName, apiKey, [{ role: "user", content: refine_template }]);
const newSolution = structuredClone(solution);
newSolution.solution_description = refined_idea;
return newSolution;
} |