Spaces:
Sleeping
Sleeping
Update knowledge_engine.py
Browse files- knowledge_engine.py +184 -184
knowledge_engine.py
CHANGED
@@ -1,184 +1,184 @@
|
|
1 |
-
import os
|
2 |
-
import pickle
|
3 |
-
from typing import List, Dict, Any
|
4 |
-
from datetime import datetime
|
5 |
-
from concurrent.futures import ThreadPoolExecutor
|
6 |
-
|
7 |
-
from config import Config
|
8 |
-
|
9 |
-
# Core ML/AI libraries
|
10 |
-
from langchain_community.document_loaders import TextLoader, DirectoryLoader
|
11 |
-
from langchain.text_splitter import RecursiveCharacterTextSplitter
|
12 |
-
from langchain_community.vectorstores import FAISS
|
13 |
-
from langchain_community.embeddings import OllamaEmbeddings
|
14 |
-
from langchain.chains import RetrievalQA
|
15 |
-
from langchain.prompts import PromptTemplate
|
16 |
-
from langchain_community.llms import Ollama
|
17 |
-
from
|
18 |
-
|
19 |
-
|
20 |
-
class KnowledgeManager:
|
21 |
-
"""Main knowledge management class handling document processing and Q&A with CoT & MoE routing"""
|
22 |
-
|
23 |
-
def __init__(self):
|
24 |
-
Config.setup_dirs()
|
25 |
-
self.embeddings = OllamaEmbeddings(model="mxbai-embed-large")
|
26 |
-
self.vector_db, self.bm25_retriever = self._init_retrievers()
|
27 |
-
self.qa_chain = self._create_moe_qa_chain()
|
28 |
-
|
29 |
-
def _init_retrievers(self):
|
30 |
-
faiss_index_path = Config.VECTOR_STORE_PATH / "index.faiss"
|
31 |
-
faiss_pkl_path = Config.VECTOR_STORE_PATH / "index.pkl"
|
32 |
-
|
33 |
-
if faiss_index_path.exists() and faiss_pkl_path.exists():
|
34 |
-
try:
|
35 |
-
vector_db = FAISS.load_local(
|
36 |
-
str(Config.VECTOR_STORE_PATH),
|
37 |
-
self.embeddings,
|
38 |
-
allow_dangerous_deserialization=True
|
39 |
-
)
|
40 |
-
if Config.BM25_STORE_PATH.exists():
|
41 |
-
with open(Config.BM25_STORE_PATH, "rb") as f:
|
42 |
-
bm25_retriever = pickle.load(f)
|
43 |
-
return vector_db, bm25_retriever
|
44 |
-
except Exception as e:
|
45 |
-
print(f"[!] Error loading existing vector store: {e}. Rebuilding...")
|
46 |
-
|
47 |
-
return self._build_retrievers_from_documents()
|
48 |
-
|
49 |
-
def _build_retrievers_from_documents(self):
|
50 |
-
if not any(Config.KNOWLEDGE_DIR.glob("**/*.txt")):
|
51 |
-
print("[i] No knowledge files found. Creating default base...")
|
52 |
-
self._create_default_knowledge()
|
53 |
-
|
54 |
-
loader = DirectoryLoader(
|
55 |
-
str(Config.KNOWLEDGE_DIR),
|
56 |
-
glob="**/*.txt",
|
57 |
-
loader_cls=TextLoader,
|
58 |
-
loader_kwargs={'encoding': 'utf-8'}
|
59 |
-
)
|
60 |
-
docs = loader.load()
|
61 |
-
splitter = RecursiveCharacterTextSplitter(
|
62 |
-
chunk_size=Config.CHUNK_SIZE,
|
63 |
-
chunk_overlap=Config.CHUNK_OVERLAP,
|
64 |
-
separators=["\n\n", "\n", ". ", "! ", "? ", "; ", " ", ""]
|
65 |
-
)
|
66 |
-
chunks = splitter.split_documents(docs)
|
67 |
-
|
68 |
-
vector_db = FAISS.from_documents(chunks, self.embeddings)
|
69 |
-
vector_db.save_local(str(Config.VECTOR_STORE_PATH))
|
70 |
-
|
71 |
-
bm25_retriever = BM25Retriever.from_documents(chunks)
|
72 |
-
bm25_retriever.k = Config.MAX_CONTEXT_CHUNKS
|
73 |
-
|
74 |
-
with open(Config.BM25_STORE_PATH, "wb") as f:
|
75 |
-
pickle.dump(bm25_retriever, f)
|
76 |
-
|
77 |
-
return vector_db, bm25_retriever
|
78 |
-
|
79 |
-
def _create_default_knowledge(self):
|
80 |
-
default_text = """Sirraya xBrain - Advanced AI Platform\n\nCreated by Amir Hameed.\n\nFeatures:\n- Hybrid Retrieval (Vector + BM25)\n- LISA Assistant\n- FAISS, Ollama, BM25 Integration"""
|
81 |
-
with open(Config.KNOWLEDGE_DIR / "sirraya_xbrain.txt", "w", encoding="utf-8") as f:
|
82 |
-
f.write(default_text)
|
83 |
-
|
84 |
-
def _parallel_retrieve(self, question: str):
|
85 |
-
"""Parallel retrieval execution: simulates Mixture of Experts routing"""
|
86 |
-
|
87 |
-
def retrieve_with_bm25():
|
88 |
-
return self.bm25_retriever.get_relevant_documents(question)
|
89 |
-
|
90 |
-
def retrieve_with_vector():
|
91 |
-
# Lowered threshold to 0.3 for better doc retrieval (adjust as needed)
|
92 |
-
retriever = self.vector_db.as_retriever(
|
93 |
-
search_type="similarity_score_threshold",
|
94 |
-
search_kwargs={"k": Config.MAX_CONTEXT_CHUNKS, "score_threshold": 0.83}
|
95 |
-
)
|
96 |
-
return retriever.get_relevant_documents(question)
|
97 |
-
|
98 |
-
with ThreadPoolExecutor(max_workers=2) as executor:
|
99 |
-
bm25_future = executor.submit(retrieve_with_bm25)
|
100 |
-
vector_future = executor.submit(retrieve_with_vector)
|
101 |
-
bm25_results = bm25_future.result()
|
102 |
-
vector_results = vector_future.result()
|
103 |
-
|
104 |
-
# Combine results; duplicates are possible, consider deduplication if needed
|
105 |
-
return vector_results + bm25_results
|
106 |
-
|
107 |
-
def _create_moe_qa_chain(self):
|
108 |
-
if not self.vector_db or not self.bm25_retriever:
|
109 |
-
return None
|
110 |
-
|
111 |
-
prompt_template = """You are LISA, an AI assistant for Sirraya xBrain. Answer using the context below:
|
112 |
-
|
113 |
-
Context:
|
114 |
-
{context}
|
115 |
-
|
116 |
-
Question: {question}
|
117 |
-
|
118 |
-
Instructions:
|
119 |
-
- Use only the context.
|
120 |
-
- Be accurate and helpful.
|
121 |
-
- If unsure, say: "I don’t have that information in my knowledge base."
|
122 |
-
|
123 |
-
Answer:"""
|
124 |
-
|
125 |
-
return RetrievalQA.from_chain_type(
|
126 |
-
llm=Ollama(model="phi", temperature=0.1),
|
127 |
-
chain_type="stuff",
|
128 |
-
retriever=self.vector_db.as_retriever(search_kwargs={"k": 1}), # Dummy retriever to satisfy LangChain
|
129 |
-
chain_type_kwargs={
|
130 |
-
"prompt": PromptTemplate(
|
131 |
-
template=prompt_template,
|
132 |
-
input_variables=["context", "question"]
|
133 |
-
)
|
134 |
-
},
|
135 |
-
return_source_documents=True
|
136 |
-
)
|
137 |
-
|
138 |
-
def query(self, question: str) -> Dict[str, Any]:
|
139 |
-
"""Query system using CoT + MoE logic"""
|
140 |
-
if not self.qa_chain:
|
141 |
-
return {
|
142 |
-
"answer": "Knowledge system not initialized. Please reload.",
|
143 |
-
"processing_time": 0,
|
144 |
-
"source_chunks": []
|
145 |
-
}
|
146 |
-
|
147 |
-
try:
|
148 |
-
start_time = datetime.now()
|
149 |
-
docs = self._parallel_retrieve(question)
|
150 |
-
|
151 |
-
# If no docs found, fallback to retriever without threshold for testing
|
152 |
-
if not docs:
|
153 |
-
retriever = self.vector_db.as_retriever(search_kwargs={"k": Config.MAX_CONTEXT_CHUNKS})
|
154 |
-
docs = retriever.get_relevant_documents(question)
|
155 |
-
|
156 |
-
# Use invoke() for chains with multiple outputs
|
157 |
-
result = self.qa_chain.invoke({"input_documents": docs, "query": question})
|
158 |
-
|
159 |
-
processing_time = (datetime.now() - start_time).total_seconds() * 1000
|
160 |
-
|
161 |
-
return {
|
162 |
-
"answer": result.get("result", ""),
|
163 |
-
"processing_time": processing_time,
|
164 |
-
"source_chunks": result.get("source_documents", [])
|
165 |
-
}
|
166 |
-
except Exception as e:
|
167 |
-
print(f"[!] Query error: {e}")
|
168 |
-
return {
|
169 |
-
"answer": f"Error: {e}",
|
170 |
-
"processing_time": 0,
|
171 |
-
"source_chunks": []
|
172 |
-
}
|
173 |
-
|
174 |
-
def get_knowledge_files_count(self) -> int:
|
175 |
-
return len(list(Config.KNOWLEDGE_DIR.glob("**/*.txt"))) if Config.KNOWLEDGE_DIR.exists() else 0
|
176 |
-
|
177 |
-
def save_uploaded_file(self, uploaded_file, filename: str) -> bool:
|
178 |
-
try:
|
179 |
-
with open(Config.KNOWLEDGE_DIR / filename, "wb") as f:
|
180 |
-
f.write(uploaded_file.getbuffer())
|
181 |
-
return True
|
182 |
-
except Exception as e:
|
183 |
-
print(f"[!] File save error: {e}")
|
184 |
-
return False
|
|
|
1 |
+
import os
|
2 |
+
import pickle
|
3 |
+
from typing import List, Dict, Any
|
4 |
+
from datetime import datetime
|
5 |
+
from concurrent.futures import ThreadPoolExecutor
|
6 |
+
|
7 |
+
from config import Config
|
8 |
+
|
9 |
+
# Core ML/AI libraries
|
10 |
+
from langchain_community.document_loaders import TextLoader, DirectoryLoader
|
11 |
+
from langchain.text_splitter import RecursiveCharacterTextSplitter
|
12 |
+
from langchain_community.vectorstores import FAISS
|
13 |
+
from langchain_community.embeddings import OllamaEmbeddings
|
14 |
+
from langchain.chains import RetrievalQA
|
15 |
+
from langchain.prompts import PromptTemplate
|
16 |
+
from langchain_community.llms import Ollama
|
17 |
+
from langchain_community.retrievers import BM25Retriever
|
18 |
+
|
19 |
+
|
20 |
+
class KnowledgeManager:
|
21 |
+
"""Main knowledge management class handling document processing and Q&A with CoT & MoE routing"""
|
22 |
+
|
23 |
+
def __init__(self):
|
24 |
+
Config.setup_dirs()
|
25 |
+
self.embeddings = OllamaEmbeddings(model="mxbai-embed-large")
|
26 |
+
self.vector_db, self.bm25_retriever = self._init_retrievers()
|
27 |
+
self.qa_chain = self._create_moe_qa_chain()
|
28 |
+
|
29 |
+
def _init_retrievers(self):
|
30 |
+
faiss_index_path = Config.VECTOR_STORE_PATH / "index.faiss"
|
31 |
+
faiss_pkl_path = Config.VECTOR_STORE_PATH / "index.pkl"
|
32 |
+
|
33 |
+
if faiss_index_path.exists() and faiss_pkl_path.exists():
|
34 |
+
try:
|
35 |
+
vector_db = FAISS.load_local(
|
36 |
+
str(Config.VECTOR_STORE_PATH),
|
37 |
+
self.embeddings,
|
38 |
+
allow_dangerous_deserialization=True
|
39 |
+
)
|
40 |
+
if Config.BM25_STORE_PATH.exists():
|
41 |
+
with open(Config.BM25_STORE_PATH, "rb") as f:
|
42 |
+
bm25_retriever = pickle.load(f)
|
43 |
+
return vector_db, bm25_retriever
|
44 |
+
except Exception as e:
|
45 |
+
print(f"[!] Error loading existing vector store: {e}. Rebuilding...")
|
46 |
+
|
47 |
+
return self._build_retrievers_from_documents()
|
48 |
+
|
49 |
+
def _build_retrievers_from_documents(self):
|
50 |
+
if not any(Config.KNOWLEDGE_DIR.glob("**/*.txt")):
|
51 |
+
print("[i] No knowledge files found. Creating default base...")
|
52 |
+
self._create_default_knowledge()
|
53 |
+
|
54 |
+
loader = DirectoryLoader(
|
55 |
+
str(Config.KNOWLEDGE_DIR),
|
56 |
+
glob="**/*.txt",
|
57 |
+
loader_cls=TextLoader,
|
58 |
+
loader_kwargs={'encoding': 'utf-8'}
|
59 |
+
)
|
60 |
+
docs = loader.load()
|
61 |
+
splitter = RecursiveCharacterTextSplitter(
|
62 |
+
chunk_size=Config.CHUNK_SIZE,
|
63 |
+
chunk_overlap=Config.CHUNK_OVERLAP,
|
64 |
+
separators=["\n\n", "\n", ". ", "! ", "? ", "; ", " ", ""]
|
65 |
+
)
|
66 |
+
chunks = splitter.split_documents(docs)
|
67 |
+
|
68 |
+
vector_db = FAISS.from_documents(chunks, self.embeddings)
|
69 |
+
vector_db.save_local(str(Config.VECTOR_STORE_PATH))
|
70 |
+
|
71 |
+
bm25_retriever = BM25Retriever.from_documents(chunks)
|
72 |
+
bm25_retriever.k = Config.MAX_CONTEXT_CHUNKS
|
73 |
+
|
74 |
+
with open(Config.BM25_STORE_PATH, "wb") as f:
|
75 |
+
pickle.dump(bm25_retriever, f)
|
76 |
+
|
77 |
+
return vector_db, bm25_retriever
|
78 |
+
|
79 |
+
def _create_default_knowledge(self):
|
80 |
+
default_text = """Sirraya xBrain - Advanced AI Platform\n\nCreated by Amir Hameed.\n\nFeatures:\n- Hybrid Retrieval (Vector + BM25)\n- LISA Assistant\n- FAISS, Ollama, BM25 Integration"""
|
81 |
+
with open(Config.KNOWLEDGE_DIR / "sirraya_xbrain.txt", "w", encoding="utf-8") as f:
|
82 |
+
f.write(default_text)
|
83 |
+
|
84 |
+
def _parallel_retrieve(self, question: str):
|
85 |
+
"""Parallel retrieval execution: simulates Mixture of Experts routing"""
|
86 |
+
|
87 |
+
def retrieve_with_bm25():
|
88 |
+
return self.bm25_retriever.get_relevant_documents(question)
|
89 |
+
|
90 |
+
def retrieve_with_vector():
|
91 |
+
# Lowered threshold to 0.3 for better doc retrieval (adjust as needed)
|
92 |
+
retriever = self.vector_db.as_retriever(
|
93 |
+
search_type="similarity_score_threshold",
|
94 |
+
search_kwargs={"k": Config.MAX_CONTEXT_CHUNKS, "score_threshold": 0.83}
|
95 |
+
)
|
96 |
+
return retriever.get_relevant_documents(question)
|
97 |
+
|
98 |
+
with ThreadPoolExecutor(max_workers=2) as executor:
|
99 |
+
bm25_future = executor.submit(retrieve_with_bm25)
|
100 |
+
vector_future = executor.submit(retrieve_with_vector)
|
101 |
+
bm25_results = bm25_future.result()
|
102 |
+
vector_results = vector_future.result()
|
103 |
+
|
104 |
+
# Combine results; duplicates are possible, consider deduplication if needed
|
105 |
+
return vector_results + bm25_results
|
106 |
+
|
107 |
+
def _create_moe_qa_chain(self):
|
108 |
+
if not self.vector_db or not self.bm25_retriever:
|
109 |
+
return None
|
110 |
+
|
111 |
+
prompt_template = """You are LISA, an AI assistant for Sirraya xBrain. Answer using the context below:
|
112 |
+
|
113 |
+
Context:
|
114 |
+
{context}
|
115 |
+
|
116 |
+
Question: {question}
|
117 |
+
|
118 |
+
Instructions:
|
119 |
+
- Use only the context.
|
120 |
+
- Be accurate and helpful.
|
121 |
+
- If unsure, say: "I don’t have that information in my knowledge base."
|
122 |
+
|
123 |
+
Answer:"""
|
124 |
+
|
125 |
+
return RetrievalQA.from_chain_type(
|
126 |
+
llm=Ollama(model="phi", temperature=0.1),
|
127 |
+
chain_type="stuff",
|
128 |
+
retriever=self.vector_db.as_retriever(search_kwargs={"k": 1}), # Dummy retriever to satisfy LangChain
|
129 |
+
chain_type_kwargs={
|
130 |
+
"prompt": PromptTemplate(
|
131 |
+
template=prompt_template,
|
132 |
+
input_variables=["context", "question"]
|
133 |
+
)
|
134 |
+
},
|
135 |
+
return_source_documents=True
|
136 |
+
)
|
137 |
+
|
138 |
+
def query(self, question: str) -> Dict[str, Any]:
|
139 |
+
"""Query system using CoT + MoE logic"""
|
140 |
+
if not self.qa_chain:
|
141 |
+
return {
|
142 |
+
"answer": "Knowledge system not initialized. Please reload.",
|
143 |
+
"processing_time": 0,
|
144 |
+
"source_chunks": []
|
145 |
+
}
|
146 |
+
|
147 |
+
try:
|
148 |
+
start_time = datetime.now()
|
149 |
+
docs = self._parallel_retrieve(question)
|
150 |
+
|
151 |
+
# If no docs found, fallback to retriever without threshold for testing
|
152 |
+
if not docs:
|
153 |
+
retriever = self.vector_db.as_retriever(search_kwargs={"k": Config.MAX_CONTEXT_CHUNKS})
|
154 |
+
docs = retriever.get_relevant_documents(question)
|
155 |
+
|
156 |
+
# Use invoke() for chains with multiple outputs
|
157 |
+
result = self.qa_chain.invoke({"input_documents": docs, "query": question})
|
158 |
+
|
159 |
+
processing_time = (datetime.now() - start_time).total_seconds() * 1000
|
160 |
+
|
161 |
+
return {
|
162 |
+
"answer": result.get("result", ""),
|
163 |
+
"processing_time": processing_time,
|
164 |
+
"source_chunks": result.get("source_documents", [])
|
165 |
+
}
|
166 |
+
except Exception as e:
|
167 |
+
print(f"[!] Query error: {e}")
|
168 |
+
return {
|
169 |
+
"answer": f"Error: {e}",
|
170 |
+
"processing_time": 0,
|
171 |
+
"source_chunks": []
|
172 |
+
}
|
173 |
+
|
174 |
+
def get_knowledge_files_count(self) -> int:
|
175 |
+
return len(list(Config.KNOWLEDGE_DIR.glob("**/*.txt"))) if Config.KNOWLEDGE_DIR.exists() else 0
|
176 |
+
|
177 |
+
def save_uploaded_file(self, uploaded_file, filename: str) -> bool:
|
178 |
+
try:
|
179 |
+
with open(Config.KNOWLEDGE_DIR / filename, "wb") as f:
|
180 |
+
f.write(uploaded_file.getbuffer())
|
181 |
+
return True
|
182 |
+
except Exception as e:
|
183 |
+
print(f"[!] File save error: {e}")
|
184 |
+
return False
|