masadonline commited on
Commit
2d95a78
·
verified ·
1 Parent(s): 9afaedf

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +119 -134
app.py CHANGED
@@ -9,7 +9,7 @@ from langchain_community.document_loaders import (
9
  Docx2txtLoader,
10
  UnstructuredExcelLoader,
11
  JSONLoader,
12
- UnstructuredFileLoader # Generic loader, good for tables
13
  )
14
  from langchain_text_splitters import RecursiveCharacterTextSplitter
15
  from langchain.embeddings import HuggingFaceEmbeddings
@@ -21,47 +21,10 @@ from langchain.schema.runnable import RunnablePassthrough
21
  from langchain.schema.output_parser import StrOutputParser
22
 
23
  # --- Configuration ---
24
- # --- Moved groq_api_key here ---
25
- load_dotenv()
26
- groq_api_key = os.getenv("GROQ_API_KEY")
27
- #groq_api_key = ""
28
  DOCS_DIR = "docs"
 
29
  EMBEDDING_MODEL_NAME = "sentence-transformers/all-MiniLM-L6-v2"
30
- CACHE_DIR = ".streamlit_cache"
31
- GENERAL_QA_PROMPT = """
32
- You are an AI assistant for our internal knowledge base.
33
- Your goal is to provide accurate and concise answers based ONLY on the provided context.
34
- Do not make up information. If the answer is not found in the context, state that clearly.
35
- Ensure your answers are directly supported by the text.
36
- Accuracy is paramount.
37
-
38
- Context:
39
- {context}
40
-
41
- Question: {question}
42
-
43
- Answer:
44
- """
45
- ORDER_STATUS_PROMPT = """
46
- You are an AI assistant helping with customer order inquiries.
47
- Based ONLY on the following retrieved information from our order system and policies:
48
- {context}
49
-
50
- The customer's query is: {question}
51
-
52
- Please perform the following steps:
53
- 1. Carefully analyze the context for any order details (Order ID, Customer Name, Status, Items, Dates, etc.).
54
- 2. If an order matching the query (or related to a name in the query) is found in the context:
55
- - Address the customer by their name if available in the order details (e.g., "Hello [Customer Name],").
56
- - Provide ALL available information about their order, including Order ID, status, items, dates, and any other relevant details found in the context.
57
- - Be comprehensive and clear.
58
- 3. If no specific order details are found in the context that match the query, politely state that you couldn't find the specific order information in the provided documents and suggest they contact support for further assistance.
59
- 4. Do NOT invent or infer any information not explicitly present in the context.
60
-
61
- Answer:
62
- """
63
-
64
-
65
 
66
  # Create docs and cache directory if they don't exist
67
  if not os.path.exists(DOCS_DIR):
@@ -92,7 +55,8 @@ def get_loader(file_path):
92
  return None
93
 
94
  # --- Caching Functions ---
95
- @st.cache_resource(show_spinner=False) # Disable spinner during initial load
 
96
  def load_and_process_documents(docs_path: str):
97
  """
98
  Loads documents from the specified path, processes them, and splits into chunks.
@@ -110,7 +74,7 @@ def load_and_process_documents(docs_path: str):
110
 
111
  for file_path in doc_files:
112
  try:
113
- st.write(f"Processing: {os.path.basename(file_path)}...") # Show progress
114
  loader = get_loader(file_path)
115
  if loader:
116
  loaded_docs = loader.load()
@@ -136,7 +100,7 @@ def load_and_process_documents(docs_path: str):
136
  st.success(f"Successfully loaded and processed {len(doc_files)} documents into {len(chunked_documents)} chunks.")
137
  return chunked_documents
138
 
139
- @st.cache_resource(show_spinner=False) # Disable spinner during initial load
140
  def create_vector_store(_documents, _embedding_model_name: str):
141
  """Creates a FAISS vector store from the given documents and embedding model."""
142
  if not _documents:
@@ -149,12 +113,9 @@ def create_vector_store(_documents, _embedding_model_name: str):
149
  return vector_store
150
  except Exception as e:
151
  st.error(f"Error creating vector store: {e}")
152
- # Return an empty FAISS instance instead of None to prevent the AttributeError.
153
- embeddings = HuggingFaceEmbeddings(model_name=_embedding_model_name) # Initialize embeddings
154
- vector_store = FAISS.from_documents([], embeddings) # Changed from None to FAISS.from_documents
155
- return vector_store
156
 
157
- @st.cache_resource(show_spinner=False) # Disable spinner during initial load
158
  def get_llm(api_key: str, model_name: str = "llama3-8b-8192"): # UPDATED MODEL
159
  """Initializes the Groq LLM."""
160
  if not api_key:
@@ -166,7 +127,7 @@ def get_llm(api_key: str, model_name: str = "llama3-8b-8192"): # UPDATED MODEL
166
  # "llama3-70b-8192" (more powerful, potentially slower)
167
  # "gemma-7b-it"
168
  llm = ChatGroq(temperature=0, groq_api_key=api_key, model_name=model_name)
169
- st.sidebar.info(f"LLM Initialized: {model_name}") # Add info about which model is used
170
  return llm
171
  except Exception as e:
172
  st.error(f"Error initializing Groq LLM: {e}")
@@ -186,7 +147,8 @@ def get_rag_chain(llm, retriever, prompt_template):
186
 
187
  # --- Main Application Logic ---
188
  def main():
189
-
 
190
 
191
  # --- UI Setup ---
192
  st.set_page_config(page_title="Internal Knowledge Base AI", layout="wide", initial_sidebar_state="expanded")
@@ -210,101 +172,124 @@ def main():
210
  </style>
211
  """, unsafe_allow_html=True)
212
 
213
- st.title("📚 Internal Knowledge Base AI 💡")
214
 
215
  st.sidebar.header("System Status")
216
  status_placeholder = st.sidebar.empty()
217
  status_placeholder.info("Initializing...")
218
 
 
219
  if not groq_api_key:
220
  status_placeholder.error("GROQ API Key not configured. Application cannot start.")
221
  st.stop()
222
 
223
- # --- Initialize session state ---
224
- if "app_initialized" not in st.session_state:
225
- st.session_state.app_initialized = False
226
-
227
- # --- Start Button ---
228
- if not st.session_state.app_initialized:
229
- if st.button("Start"): # Create a start button
230
- st.session_state.app_initialized = True # set the session state to true
231
- st.rerun() # Rerun the app to trigger the knowledge base loading
232
-
233
- # --- Knowledge Base Loading and LLM Initialization ---
234
- if st.session_state.app_initialized: # only run if the app has been initialized
235
- with st.spinner("Knowledge Base is loading... Please wait."):
236
- start_time = time.time()
237
- processed_documents = load_and_process_documents(DOCS_DIR)
238
- if not processed_documents:
239
- status_placeholder.error("Failed to load or process documents. Check logs and `docs` folder.")
240
- st.stop()
241
-
242
- vector_store = create_vector_store(processed_documents, EMBEDDING_MODEL_NAME)
243
- if not vector_store:
244
- status_placeholder.error("Failed to create vector store. Application cannot proceed.")
245
- st.stop()
246
-
247
- # Pass the selected model to get_llm
248
- llm = get_llm(groq_api_key, model_name="llama3-8b-8192") # Hardcoded to use llama3-8b-8192
249
- if not llm:
250
- # Error is already shown by get_llm, but update status_placeholder too
251
- status_placeholder.error("Failed to initialize LLM. Application cannot proceed.")
252
- st.stop()
253
-
254
- end_time = time.time()
255
- # status_placeholder is updated by get_llm or on success below
256
- status_placeholder.success(f"Application Ready! (Loaded in {end_time - start_time:.2f}s)")
257
-
258
- retriever = vector_store.as_retriever(search_kwargs={"k": 5})
259
-
260
- # --- Query Input and Response ---
261
- st.markdown("---")
262
- st.subheader("Ask a question about our documents:")
263
-
264
-
265
-
266
- if "messages" not in st.session_state:
267
- st.session_state.messages = []
268
-
269
- query = st.text_input("Enter your question:", key="query_input", placeholder="e.g., 'What is the return policy?' or 'Status of order for John Doe?'")
270
-
271
- if st.button("Submit", key="submit_button"):
272
- if query:
273
- st.session_state.messages.append({"role": "user", "content": query})
274
-
275
- current_model_info = st.sidebar.empty() # Placeholder for current mode info
276
-
277
- if "order" in query.lower() and (
278
- "status" in query.lower() or "track" in query.lower() or "update" in query.lower() or any(
279
- name_part.lower() in query.lower() for name_part in ["customer", "client", "name"])):
280
- active_prompt_template = ORDER_STATUS_PROMPT
281
- current_model_info.info("Mode: Order Status Query")
282
- else:
283
- active_prompt_template = GENERAL_QA_PROMPT
284
- current_model_info.info("Mode: General Query")
285
-
286
- rag_chain = get_rag_chain(llm, retriever, active_prompt_template)
287
-
288
- with st.spinner("Thinking..."):
289
- try:
290
- response = rag_chain.invoke(query)
291
- st.session_state.messages.append({"role": "assistant", "content": response})
292
- except Exception as e:
293
- st.error(f"Error during RAG chain invocation: {e}")
294
- response = "Sorry, I encountered an error while processing your request."
295
- st.session_state.messages.append({"role": "assistant", "content": response})
296
- else:
297
- st.warning("Please enter a question.")
298
 
299
- st.markdown("---")
300
- st.subheader("Response:")
301
- response_area = st.container()
302
- # Ensure response_area is robust against empty messages or incorrect last role
303
- last_assistant_message = "Ask a question to see the answer here."
304
- if st.session_state.messages and st.session_state.messages[-1]['role'] == 'assistant':
305
- last_assistant_message = st.session_state.messages[-1]['content']
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
306
 
307
- response_area.markdown(f"<div class='response-area'>{last_assistant_message}</div>", unsafe_allow_html=True)
308
 
309
  st.sidebar.markdown("---")
310
  st.sidebar.markdown("Built with ❤️ using Streamlit & Langchain & Groq")
 
9
  Docx2txtLoader,
10
  UnstructuredExcelLoader,
11
  JSONLoader,
12
+ UnstructuredFileLoader # Generic loader, good for tables
13
  )
14
  from langchain_text_splitters import RecursiveCharacterTextSplitter
15
  from langchain.embeddings import HuggingFaceEmbeddings
 
21
  from langchain.schema.output_parser import StrOutputParser
22
 
23
  # --- Configuration ---
 
 
 
 
24
  DOCS_DIR = "docs"
25
+ # Using a local sentence transformer model for embeddings
26
  EMBEDDING_MODEL_NAME = "sentence-transformers/all-MiniLM-L6-v2"
27
+ CACHE_DIR = ".streamlit_cache" # For potential disk-based caching if needed beyond Streamlit's default
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
28
 
29
  # Create docs and cache directory if they don't exist
30
  if not os.path.exists(DOCS_DIR):
 
55
  return None
56
 
57
  # --- Caching Functions ---
58
+
59
+ @st.cache_resource(show_spinner="Loading and Processing Documents...")
60
  def load_and_process_documents(docs_path: str):
61
  """
62
  Loads documents from the specified path, processes them, and splits into chunks.
 
74
 
75
  for file_path in doc_files:
76
  try:
77
+ st.write(f"Processing: {os.path.basename(file_path)}...") # Show progress
78
  loader = get_loader(file_path)
79
  if loader:
80
  loaded_docs = loader.load()
 
100
  st.success(f"Successfully loaded and processed {len(doc_files)} documents into {len(chunked_documents)} chunks.")
101
  return chunked_documents
102
 
103
+ @st.cache_resource(show_spinner="Creating Vector Store (Embeddings)...")
104
  def create_vector_store(_documents, _embedding_model_name: str):
105
  """Creates a FAISS vector store from the given documents and embedding model."""
106
  if not _documents:
 
113
  return vector_store
114
  except Exception as e:
115
  st.error(f"Error creating vector store: {e}")
116
+ return None
 
 
 
117
 
118
+ @st.cache_resource(show_spinner="Initializing LLM...")
119
  def get_llm(api_key: str, model_name: str = "llama3-8b-8192"): # UPDATED MODEL
120
  """Initializes the Groq LLM."""
121
  if not api_key:
 
127
  # "llama3-70b-8192" (more powerful, potentially slower)
128
  # "gemma-7b-it"
129
  llm = ChatGroq(temperature=0, groq_api_key=api_key, model_name=model_name)
130
+ st.sidebar.info(f"LLM Initialized: {model_name}") # Add info about which model is used
131
  return llm
132
  except Exception as e:
133
  st.error(f"Error initializing Groq LLM: {e}")
 
147
 
148
  # --- Main Application Logic ---
149
  def main():
150
+ load_dotenv()
151
+ groq_api_key = os.getenv("GROQ_API_KEY")
152
 
153
  # --- UI Setup ---
154
  st.set_page_config(page_title="Internal Knowledge Base AI", layout="wide", initial_sidebar_state="expanded")
 
172
  </style>
173
  """, unsafe_allow_html=True)
174
 
175
+ st.title("📚 Internal Knowledge Base AI ")
176
 
177
  st.sidebar.header("System Status")
178
  status_placeholder = st.sidebar.empty()
179
  status_placeholder.info("Initializing...")
180
 
181
+
182
  if not groq_api_key:
183
  status_placeholder.error("GROQ API Key not configured. Application cannot start.")
184
  st.stop()
185
 
186
+ # --- Knowledge Base Loading ---
187
+ with st.spinner("Knowledge Base is loading... Please wait."):
188
+ start_time = time.time()
189
+ processed_documents = load_and_process_documents(DOCS_DIR)
190
+ if not processed_documents:
191
+ status_placeholder.error("Failed to load or process documents. Check logs and `docs` folder.")
192
+ st.stop()
193
+
194
+ vector_store = create_vector_store(processed_documents, EMBEDDING_MODEL_NAME)
195
+ if not vector_store:
196
+ status_placeholder.error("Failed to create vector store. Application cannot proceed.")
197
+ st.stop()
198
+
199
+ # Pass the selected model to get_llm
200
+ llm = get_llm(groq_api_key, model_name="llama3-8b-8192") # Hardcoded to use llama3-8b-8192
201
+ if not llm:
202
+ # Error is already shown by get_llm, but update status_placeholder too
203
+ status_placeholder.error("Failed to initialize LLM. Application cannot proceed.")
204
+ st.stop()
205
+
206
+ end_time = time.time()
207
+ # status_placeholder is updated by get_llm or on success below
208
+ status_placeholder.success(f"Application Ready! (Loaded in {end_time - start_time:.2f}s)")
209
+
210
+ retriever = vector_store.as_retriever(search_kwargs={"k": 5})
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
211
 
212
+ # --- Query Input and Response ---
213
+
214
+ st.markdown("---")
215
+ st.subheader("Ask a question about our documents:")
216
+
217
+ # Prompt templates
218
+ GENERAL_QA_PROMPT = """
219
+ You are an AI assistant for our internal knowledge base.
220
+ Your goal is to provide accurate and concise answers based ONLY on the provided context.
221
+ Do not make up information. If the answer is not found in the context, state that clearly.
222
+ Ensure your answers are directly supported by the text.
223
+ Accuracy is paramount.
224
+
225
+ Context:
226
+ {context}
227
+
228
+ Question: {question}
229
+
230
+ Answer:
231
+ """
232
+
233
+ ORDER_STATUS_PROMPT = """
234
+ You are an AI assistant helping with customer order inquiries.
235
+ Based ONLY on the following retrieved information from our order system and policies:
236
+ {context}
237
+
238
+ The customer's query is: {question}
239
+
240
+ Please perform the following steps:
241
+ 1. Carefully analyze the context for any order details (Order ID, Customer Name, Status, Items, Dates, etc.).
242
+ 2. If an order matching the query (or related to a name in the query) is found in the context:
243
+ - Address the customer by their name if available in the order details (e.g., "Hello [Customer Name],").
244
+ - Provide ALL available information about their order, including Order ID, status, items, dates, and any other relevant details found in the context.
245
+ - Be comprehensive and clear.
246
+ 3. If no specific order details are found in the context that match the query, or if the context is insufficient, politely state that you couldn't find the specific order information in the provided documents and suggest they contact support for further assistance.
247
+ 4. Do NOT invent or infer any information not explicitly present in the context.
248
+
249
+ Answer:
250
+ """
251
+
252
+ if "messages" not in st.session_state:
253
+ st.session_state.messages = []
254
+
255
+ query = st.text_input("Enter your question:", key="query_input", placeholder="e.g., 'What is the return policy?' or 'Status of order for John Doe?'")
256
+
257
+ if st.button("Submit", key="submit_button"):
258
+ if query:
259
+ st.session_state.messages.append({"role": "user", "content": query})
260
+
261
+ current_model_info = st.sidebar.empty() # Placeholder for current mode info
262
+
263
+ if "order" in query.lower() and ("status" in query.lower() or "track" in query.lower() or "update" in query.lower() or any(name_part.lower() in query.lower() for name_part in ["customer", "client", "name"])):
264
+ active_prompt_template = ORDER_STATUS_PROMPT
265
+ current_model_info.info("Mode: Order Status Query")
266
+ else:
267
+ active_prompt_template = GENERAL_QA_PROMPT
268
+ current_model_info.info("Mode: General Query")
269
+
270
+ rag_chain = get_rag_chain(llm, retriever, active_prompt_template)
271
+
272
+ with st.spinner("Thinking..."):
273
+ try:
274
+ response = rag_chain.invoke(query)
275
+ st.session_state.messages.append({"role": "assistant", "content": response})
276
+ except Exception as e:
277
+ st.error(f"Error during RAG chain invocation: {e}")
278
+ response = "Sorry, I encountered an error while processing your request."
279
+ st.session_state.messages.append({"role": "assistant", "content": response})
280
+ else:
281
+ st.warning("Please enter a question.")
282
+
283
+ st.markdown("---")
284
+ st.subheader("Response:")
285
+ response_area = st.container()
286
+ # Ensure response_area is robust against empty messages or incorrect last role
287
+ last_assistant_message = "Ask a question to see the answer here."
288
+ if st.session_state.messages and st.session_state.messages[-1]['role'] == 'assistant':
289
+ last_assistant_message = st.session_state.messages[-1]['content']
290
+
291
+ response_area.markdown(f"<div class='response-area'>{last_assistant_message}</div>", unsafe_allow_html=True)
292
 
 
293
 
294
  st.sidebar.markdown("---")
295
  st.sidebar.markdown("Built with ❤️ using Streamlit & Langchain & Groq")