Spaces:
Paused
Paused
correction rag model
Browse files- rag_model_optimise.py +4 -2
rag_model_optimise.py
CHANGED
@@ -57,8 +57,7 @@ Contexte :
|
|
57 |
|
58 |
Question initiale : {question}
|
59 |
Question reformulée :"""
|
60 |
-
output = self.llm(prompt, max_tokens=128, stop=["
|
61 |
-
"], stream=False)
|
62 |
reformulated = output["choices"][0]["text"].strip()
|
63 |
logger.info(f"📝 Reformulée avec contexte : {reformulated}")
|
64 |
return reformulated
|
@@ -74,6 +73,9 @@ Question reformulée :"""
|
|
74 |
def rerank_nodes(self, question: str, retrieved_nodes, top_k: int = 3):
|
75 |
logger.info(f"🔍 Re-ranking des {len(retrieved_nodes)} chunks pour la question : « {question} »")
|
76 |
q_emb = self.embed_model.get_query_embedding(question)
|
|
|
|
|
|
|
77 |
scored_nodes = []
|
78 |
|
79 |
for node in retrieved_nodes:
|
|
|
57 |
|
58 |
Question initiale : {question}
|
59 |
Question reformulée :"""
|
60 |
+
output = self.llm(prompt, max_tokens=128, stop=[""], stream=False)
|
|
|
61 |
reformulated = output["choices"][0]["text"].strip()
|
62 |
logger.info(f"📝 Reformulée avec contexte : {reformulated}")
|
63 |
return reformulated
|
|
|
73 |
def rerank_nodes(self, question: str, retrieved_nodes, top_k: int = 3):
|
74 |
logger.info(f"🔍 Re-ranking des {len(retrieved_nodes)} chunks pour la question : « {question} »")
|
75 |
q_emb = self.embed_model.get_query_embedding(question)
|
76 |
+
if q_emb is None:
|
77 |
+
logger.warning("Embedding de la wuestion introuvable")
|
78 |
+
return retrieved_nodes[:top_k]
|
79 |
scored_nodes = []
|
80 |
|
81 |
for node in retrieved_nodes:
|