masadonline commited on
Commit
3f46408
·
verified ·
1 Parent(s): 6ed44f6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +300 -248
app.py CHANGED
@@ -1,298 +1,350 @@
1
  import streamlit as st
2
  import os
 
3
  from dotenv import load_dotenv
4
- from langchain_community.document_loaders import DirectoryLoader, PyPDFLoader, UnstructuredFileLoader
5
- from langchain_community.vectorstores import FAISS
6
- from langchain_huggingface import HuggingFaceEmbeddings
 
 
 
 
 
 
7
  from langchain_text_splitters import RecursiveCharacterTextSplitter
8
- from langchain.chains import RetrievalQA
 
9
  from langchain_groq import ChatGroq
10
- import time
11
- import glob
 
 
12
 
13
  # --- Configuration ---
14
  DOCS_DIR = "docs"
15
- CHUNK_SIZE = 1500
16
- CHUNK_OVERLAP = 200
17
- EMBEDDING_MODEL_NAME = "sentence-transformers/all-MiniLM-L6-v2" # Good default, consider others for specific needs
18
- CACHE_DIR_FAISS = "faiss_index_cache" # Directory to cache FAISS index
19
 
20
- # --- Helper Functions ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
21
 
22
- def get_api_key():
23
- """Loads GROQ API key from .env file or environment variables."""
24
- load_dotenv()
25
- groq_api_key = os.getenv("GROQ_API_KEY")
26
- if not groq_api_key:
27
- st.error("GROQ_API_KEY not found. Please set it in your environment variables or a .env file.")
28
- st.stop()
29
- return groq_api_key
30
 
31
- @st.cache_resource(show_spinner="Loading and processing documents...")
32
- def load_and_process_documents(_docs_dir: str):
33
  """
34
- Loads documents from the specified directory, processes them,
35
- creates embeddings, and stores them in a FAISS vector store.
36
- Caches the FAISS index to disk for faster subsequent loads.
37
  """
38
- if not os.path.exists(_docs_dir) or not os.listdir(_docs_dir):
39
- st.warning(f"The '{_docs_dir}' directory is empty or does not exist. Please add your documents.")
40
- return None
 
41
 
42
- st.write(f"Scanning for documents in '{_docs_dir}'...")
 
 
 
43
 
44
- # Using UnstructuredFileLoader for broader file type support including tables
45
- # We'll use glob to find all files and pass them to UnstructuredFileLoader
46
- all_files = []
47
- supported_extensions = ["*.pdf", "*.docx", "*.doc", "*.xlsx", "*.xls", "*.json", "*.txt", "*.md", "*.html", "*.csv", "*.pptx"] # Add more if needed
48
- for ext in supported_extensions:
49
- all_files.extend(glob.glob(os.path.join(_docs_dir, ext)))
50
-
51
- if not all_files:
52
- st.warning(f"No supported documents found in '{_docs_dir}'. Supported types: {', '.join(supported_extensions)}")
53
- return None
54
-
55
- st.write(f"Found {len(all_files)} files to process: {', '.join([os.path.basename(f) for f in all_files])}")
56
-
57
- docs = []
58
- progress_bar = st.progress(0, text="Loading documents...")
59
- for i, file_path in enumerate(all_files):
60
  try:
61
- st.write(f"Processing: {os.path.basename(file_path)}")
62
- # UnstructuredFileLoader is good for various formats and attempts to handle tables.
63
- # For complex tables in PDFs, more specialized parsers might be needed if Unstructured is insufficient.
64
- # Common arguments for UnstructuredFileLoader for better table extraction:
65
- # strategy="hi_res" (for PDFs with complex layouts, may require `detectron2` installation)
66
- # mode="elements" or "paged"
67
- # pdf_infer_table_structure=True (if using unstructured[pdf])
68
- loader = UnstructuredFileLoader(file_path, mode="elements", strategy="fast") # Start with "fast", try "hi_res" if table extraction is poor
69
- loaded_docs = loader.load()
70
- docs.extend(loaded_docs)
71
  except Exception as e:
72
- st.error(f"Error loading file {os.path.basename(file_path)}: {e}")
73
- progress_bar.progress((i + 1) / len(all_files), text=f"Loaded {os.path.basename(file_path)}")
74
-
75
- if not docs:
76
- st.warning("No documents were successfully loaded or processed.")
77
- return None
78
-
79
- progress_bar.progress(1.0, text="Documents loaded. Splitting into chunks...")
80
- time.sleep(0.5) # For UX
81
-
82
- text_splitter = RecursiveCharacterTextSplitter(chunk_size=CHUNK_SIZE, chunk_overlap=CHUNK_OVERLAP)
83
- texts = text_splitter.split_documents(docs)
84
-
85
- if not texts:
86
- st.warning("Document splitting resulted in no text chunks. Check document content and splitter settings.")
 
 
 
 
 
 
 
87
  return None
88
-
89
- st.write(f"Split documents into {len(texts)} chunks.")
90
- progress_bar.progress(0, text="Generating embeddings and creating vector store... (This may take a while)")
91
-
92
- # Initialize embeddings
93
  try:
94
- embeddings = HuggingFaceEmbeddings(model_name=EMBEDDING_MODEL_NAME)
95
- except Exception as e:
96
- st.error(f"Failed to load embedding model '{EMBEDDING_MODEL_NAME}': {e}")
97
- st.error("Please ensure you have an internet connection and the model name is correct.")
98
- st.stop()
99
-
100
-
101
- # Create FAISS vector store and cache it
102
- if os.path.exists(CACHE_DIR_FAISS) and os.listdir(CACHE_DIR_FAISS):
103
- try:
104
- st.write(f"Loading cached FAISS index from {CACHE_DIR_FAISS}...")
105
- vector_store = FAISS.load_local(CACHE_DIR_FAISS, embeddings, allow_dangerous_deserialization=True) # Required for FAISS with HuggingFaceEmbeddings
106
- st.write("FAISS index loaded from cache.")
107
- progress_bar.progress(1.0, text="Vector store ready.")
108
- return vector_store
109
- except Exception as e:
110
- st.warning(f"Could not load FAISS index from cache: {e}. Rebuilding index.")
111
-
112
- try:
113
- vector_store = FAISS.from_documents(texts, embeddings)
114
- if not os.path.exists(CACHE_DIR_FAISS):
115
- os.makedirs(CACHE_DIR_FAISS)
116
- vector_store.save_local(CACHE_DIR_FAISS)
117
- st.write(f"FAISS index created and saved to {CACHE_DIR_FAISS}.")
118
- progress_bar.progress(1.0, text="Vector store ready.")
119
  return vector_store
120
  except Exception as e:
121
- st.error(f"Error creating FAISS vector store: {e}")
122
  return None
123
 
124
-
125
  @st.cache_resource(show_spinner="Initializing LLM...")
126
- def get_llm(_api_key: str):
127
- """Initializes and returns the ChatGroq LLM."""
 
 
 
128
  try:
129
- llm = ChatGroq(
130
- groq_api_key=_api_key,
131
- model_name="mixtral-8x7b-32768", # Or "llama3-70b-8192", "llama3-8b-8192", "gemma-7b-it"
132
- temperature=0.2, # Adjust for creativity vs. factuality
133
- # max_tokens=1024, # Optional: set max tokens
134
- )
135
  return llm
136
  except Exception as e:
137
- st.error(f"Error initializing GROQ LLM: {e}")
138
- st.stop()
139
 
140
- # --- Streamlit App UI ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
141
 
142
- st.set_page_config(page_title="SmartQuery RAG", layout="wide", initial_sidebar_state="expanded")
 
 
 
143
 
144
- # --- Styling (Optional - for a "catchy" look) ---
145
- st.markdown("""
 
 
 
146
  <style>
147
- .stApp {
148
- background-color: #f0f2f6; /* Light grey background */
 
 
149
  }
150
- .stTextInput > div > div > input {
151
- background-color: #ffffff;
 
 
 
 
 
152
  border-radius: 10px;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
153
  }
 
154
  .stButton > button {
155
- border-radius: 10px;
156
- background-color: #1E88E5; /* Blue */
157
  color: white;
158
- font-weight: bold;
 
 
 
159
  transition: background-color 0.3s ease;
160
  }
161
  .stButton > button:hover {
162
- background-color: #1565C0; /* Darker Blue */
163
- }
164
- .stSpinner > div > svg { /* Spinner color */
165
- fill: #1E88E5;
166
  }
167
- .loader-text {
168
- font-size: 1.2em;
169
- color: #333;
170
  }
171
- .ready-text {
172
- font-size: 1.2em;
173
- color: green;
174
- font-weight: bold;
175
- }
176
- .response-container {
177
- background-color: #ffffff;
178
- padding: 20px;
179
- border-radius: 10px;
180
- box-shadow: 0 4px 8px rgba(0,0,0,0.1);
181
- margin-top: 20px;
182
- }
183
- .response-header {
184
- font-size: 1.5em;
185
- color: #1E88E5;
186
- margin-bottom: 10px;
187
  }
188
  </style>
189
- """, unsafe_allow_html=True)
190
 
 
 
 
 
 
 
191
 
192
- # --- Main Application Logic ---
193
- st.title("📄 SmartQuery RAG Assistant")
194
- st.markdown("Ask questions about your documents (Customer Orders, Company Policies, Financial Data, Products, Return Policies, etc.)")
195
 
196
- # Load API Key
197
- groq_api_key = get_api_key()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
198
 
199
- # Sidebar for status and controls
200
- st.sidebar.header("Knowledge Base Status")
201
- status_placeholder = st.sidebar.empty()
202
- status_placeholder.markdown("<p class='loader-text'>Knowledge Base is loading...</p>", unsafe_allow_html=True)
203
 
204
- # Create docs directory if it doesn't exist
205
- if not os.path.exists(DOCS_DIR):
206
- os.makedirs(DOCS_DIR)
207
- st.sidebar.info(f"'{DOCS_DIR}' directory created. Please add your documents there and refresh.")
208
 
209
- # Load and process documents, then initialize RAG
210
- vector_store = load_and_process_documents(DOCS_DIR)
 
 
 
 
 
 
211
 
212
- if vector_store:
213
- llm = get_llm(groq_api_key)
214
- retriever = vector_store.as_retriever(
215
- search_type="similarity", # "mmr" (Maximal Marginal Relevance) is another option
216
- search_kwargs={"k": 5} # Retrieve top 5 relevant chunks
217
- )
218
- qa_chain = RetrievalQA.from_chain_type(
219
- llm=llm,
220
- chain_type="stuff", # Options: "stuff", "map_reduce", "refine", "map_rerank"
221
- retriever=retriever,
222
- return_source_documents=True # Set to True to see which documents were retrieved
223
- )
224
- status_placeholder.markdown("<p class='ready-text'>✅ Application is Ready. Ask your questions!</p>", unsafe_allow_html=True)
225
- st.sidebar.success("Knowledge Base Loaded Successfully!")
226
- if st.sidebar.button("🔄 Clear Cache & Reload Documents"):
227
- # Clear specific caches or the entire cache
228
- st.cache_resource.clear() # Clears all @st.cache_resource
229
- # Could also selectively clear st.cache_data if used.
230
- # Manually delete FAISS cache directory
231
- if os.path.exists(CACHE_DIR_FAISS):
232
- import shutil
233
- shutil.rmtree(CACHE_DIR_FAISS)
234
- st.sidebar.info(f"Cache '{CACHE_DIR_FAISS}' cleared.")
235
- st.rerun()
236
-
237
- else:
238
- status_placeholder.error("⚠️ Knowledge Base could not be loaded. Check messages above and ensure documents are in the 'docs' folder.")
239
- st.stop()
240
-
241
-
242
- # --- User Interaction ---
243
- st.markdown("---")
244
- query = st.text_input("Enter your question:", placeholder="e.g., What is the return policy for electronics?")
245
-
246
- if st.button("Submit Query", type="primary"):
247
- if not query:
248
- st.warning("Please enter a question.")
249
- else:
250
- with st.spinner("🧠 Thinking... Fetching answer..."):
251
- try:
252
- start_time = time.time()
253
- response = qa_chain.invoke({"query": query})
254
- end_time = time.time()
255
-
256
- st.markdown("<div class='response-container'>", unsafe_allow_html=True)
257
- st.markdown("<p class='response-header'>💡 Answer:</p>", unsafe_allow_html=True)
258
- st.write(response["result"])
259
- st.markdown("</div>", unsafe_allow_html=True)
260
-
261
- st.info(f"Response generated in {end_time - start_time:.2f} seconds.")
262
-
263
- with st.expander("📚 Show Retrieved Sources"):
264
- if "source_documents" in response and response["source_documents"]:
265
- for i, doc in enumerate(response["source_documents"]):
266
- st.markdown(f"**Source {i+1} (from: {doc.metadata.get('source', 'N/A').split('/')[-1]})**")
267
- st.caption(doc.page_content[:500] + "..." if doc.page_content else "N/A") # Display first 500 chars
268
- st.markdown("---")
269
- else:
270
- st.write("No specific source documents were identified for this query.")
271
-
272
- except Exception as e:
273
- st.error(f"An error occurred while processing your query: {e}")
274
-
275
- # --- Suggestions for Improvement (as per prompt request) ---
276
- st.sidebar.markdown("---")
277
- st.sidebar.subheader("💡 Suggestions & Notes:")
278
- st.sidebar.markdown("""
279
- - **Table Data:** `UnstructuredFileLoader` attempts to parse tables. For PDFs with very complex tables, if accuracy is insufficient, consider:
280
- - Pre-processing PDFs with tools like `Camelot` or `Tabula-py` to extract tables into CSV/Markdown, then load those.
281
- - Exploring `unstructured` with `strategy="hi_res"` (may require `detectron2` and `brew install poppler` or similar for your OS). This is more computationally intensive.
282
- - Fine-tuning embedding models or using models specialized for tabular data if table queries are critical.
283
- - **Accuracy:** "100% accuracy" is an ideal. RAG systems are powerful but can make mistakes. Improve by:
284
- - Better chunking strategies.
285
- - More advanced retrieval (e.g., HyDE, re-ranking).
286
- - Prompt engineering for the QA chain.
287
- - Using more powerful (and potentially slower/costlier) LLMs if available via GROQ.
288
- - Regularly evaluating and curating the document set.
289
- - **Performance:** The current FAISS caching helps significantly. For very large datasets, explore more scalable vector DBs.
290
- - **UI/UX:** Added some basic styling. For more "catchy" UI, explore Streamlit Components or more elaborate CSS.
291
- - **Error Handling:** Added basic error checks. Robust applications need more comprehensive error management.
292
- - **Scalability:** For many concurrent users on Hugging Face, resource limits (CPU, RAM) for the free tier might be a bottleneck, especially during embedding.
293
- - **Embedding Model:** `all-MiniLM-L6-v2` is efficient. For higher accuracy with more complex content, consider models like `sentence-transformers/all-mpnet-base-v2` or domain-specific embeddings.
294
- - **Deployment:** Ensure `GROQ_API_KEY` is set as a secret in Hugging Face Spaces.
295
- """)
296
-
297
- st.markdown("---")
298
- st.markdown("<p style='text-align: center; color: grey;'>Powered by Streamlit, Langchain & GROQ</p>", unsafe_allow_html=True)
 
1
  import streamlit as st
2
  import os
3
+ import glob
4
  from dotenv import load_dotenv
5
+ import time
6
+
7
+ from langchain_community.document_loaders import (
8
+ PyPDFLoader,
9
+ Docx2txtLoader,
10
+ UnstructuredExcelLoader,
11
+ JSONLoader,
12
+ UnstructuredFileLoader # Generic loader, good for tables
13
+ )
14
  from langchain_text_splitters import RecursiveCharacterTextSplitter
15
+ from langchain.embeddings import HuggingFaceEmbeddings
16
+ from langchain_community.vectorstores import FAISS
17
  from langchain_groq import ChatGroq
18
+ from langchain.chains import RetrievalQA
19
+ from langchain.prompts import PromptTemplate
20
+ from langchain.schema.runnable import RunnablePassthrough
21
+ from langchain.schema.output_parser import StrOutputParser
22
 
23
  # --- Configuration ---
24
  DOCS_DIR = "docs"
25
+ # Using a local sentence transformer model for embeddings
26
+ EMBEDDING_MODEL_NAME = "sentence-transformers/all-MiniLM-L6-v2"
27
+ CACHE_DIR = ".streamlit_cache" # For potential disk-based caching if needed beyond Streamlit's default
 
28
 
29
+ # Create docs and cache directory if they don't exist
30
+ if not os.path.exists(DOCS_DIR):
31
+ os.makedirs(DOCS_DIR)
32
+ if not os.path.exists(CACHE_DIR):
33
+ os.makedirs(CACHE_DIR)
34
+
35
+ # --- Helper Function for Document Loading ---
36
+ def get_loader(file_path):
37
+ """Detects file type and returns appropriate Langchain loader."""
38
+ _, ext = os.path.splitext(file_path)
39
+ ext = ext.lower()
40
+ # Prioritize UnstructuredFileLoader for robust table and content extraction
41
+ # UnstructuredFileLoader can handle many types, but we can specify if needed
42
+ if ext in ['.pdf', '.docx', '.doc', '.xlsx', '.xls', '.json', '.txt', '.md', '.html', '.xml', '.eml', '.msg']:
43
+ return UnstructuredFileLoader(file_path, mode="elements", strategy="fast") # "elements" is good for tables
44
+ # Fallback or specific loaders if UnstructuredFileLoader has issues with a particular file
45
+ # elif ext == ".pdf":
46
+ # return PyPDFLoader(file_path) # Basic PDF loader
47
+ # elif ext in [".docx", ".doc"]:
48
+ # return Docx2txtLoader(file_path) # Basic DOCX loader
49
+ # elif ext in [".xlsx", ".xls"]:
50
+ # return UnstructuredExcelLoader(file_path, mode="elements") # Unstructured for Excel
51
+ # elif ext == ".json":
52
+ # return JSONLoader(file_path, jq_schema='.[]', text_content=False) # Adjust jq_schema as needed
53
+ else:
54
+ st.warning(f"Unsupported file type: {ext}. Skipping {os.path.basename(file_path)}")
55
+ return None
56
 
57
+ # --- Caching Functions ---
 
 
 
 
 
 
 
58
 
59
+ @st.cache_resource(show_spinner="Loading and Processing Documents...")
60
+ def load_and_process_documents(docs_path: str):
61
  """
62
+ Loads documents from the specified path, processes them, and splits into chunks.
63
+ Uses UnstructuredFileLoader for potentially better table extraction.
 
64
  """
65
+ documents = []
66
+ doc_files = []
67
+ for ext in ["*.pdf", "*.docx", "*.xlsx", "*.json", "*.txt", "*.md"]:
68
+ doc_files.extend(glob.glob(os.path.join(docs_path, ext)))
69
 
70
+ if not doc_files:
71
+ st.error(f"No documents found in the '{docs_path}' directory. Please add some documents.")
72
+ st.info("Supported formats: .pdf, .docx, .xlsx, .json, .txt, .md")
73
+ return []
74
 
75
+ for file_path in doc_files:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
76
  try:
77
+ st.write(f"Processing: {os.path.basename(file_path)}...") # Show progress
78
+ loader = get_loader(file_path)
79
+ if loader:
80
+ loaded_docs = loader.load()
81
+ # Add source metadata to each document for better traceability
82
+ for doc in loaded_docs:
83
+ doc.metadata["source"] = os.path.basename(file_path)
84
+ documents.extend(loaded_docs)
 
 
85
  except Exception as e:
86
+ st.error(f"Error loading {os.path.basename(file_path)}: {e}")
87
+ st.warning(f"Skipping file {os.path.basename(file_path)} due to error.")
88
+
89
+ if not documents:
90
+ st.error("No documents were successfully loaded or processed.")
91
+ return []
92
+
93
+ text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=200)
94
+ chunked_documents = text_splitter.split_documents(documents)
95
+
96
+ if not chunked_documents:
97
+ st.error("Document processing resulted in no text chunks. Check document content and parsing.")
98
+ return []
99
+
100
+ st.success(f"Successfully loaded and processed {len(doc_files)} documents into {len(chunked_documents)} chunks.")
101
+ return chunked_documents
102
+
103
+ @st.cache_resource(show_spinner="Creating Vector Store (Embeddings)...")
104
+ def create_vector_store(_documents, _embedding_model_name: str):
105
+ """Creates a FAISS vector store from the given documents and embedding model."""
106
+ if not _documents:
107
+ st.warning("Cannot create vector store: No documents processed.")
108
  return None
 
 
 
 
 
109
  try:
110
+ embeddings = HuggingFaceEmbeddings(model_name=_embedding_model_name)
111
+ vector_store = FAISS.from_documents(_documents, embedding=embeddings)
112
+ st.success("Vector Store created successfully!")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
113
  return vector_store
114
  except Exception as e:
115
+ st.error(f"Error creating vector store: {e}")
116
  return None
117
 
 
118
  @st.cache_resource(show_spinner="Initializing LLM...")
119
+ def get_llm(api_key: str, model_name: str = "mixtral-8x7b-32768"): # "llama3-70b-8192" is another option
120
+ """Initializes the Groq LLM."""
121
+ if not api_key:
122
+ st.error("GROQ_API_KEY not found! Please set it in your environment variables or a .env file.")
123
+ return None
124
  try:
125
+ llm = ChatGroq(temperature=0, groq_api_key=api_key, model_name=model_name)
 
 
 
 
 
126
  return llm
127
  except Exception as e:
128
+ st.error(f"Error initializing Groq LLM: {e}")
129
+ return None
130
 
131
+ # --- RAG Chain Setup ---
132
+ def get_rag_chain(llm, retriever, prompt_template_str):
133
+ """Creates a RAG chain with the given LLM, retriever, and prompt template."""
134
+ prompt = PromptTemplate(
135
+ template=prompt_template_str,
136
+ input_variables=["context", "question"]
137
+ )
138
+
139
+ rag_chain = (
140
+ {"context": retriever, "question": RunnablePassthrough()}
141
+ | prompt
142
+ | llm
143
+ | StrOutputParser()
144
+ )
145
+ return rag_chain
146
 
147
+ # --- Main Application Logic ---
148
+ def main():
149
+ load_dotenv()
150
+ groq_api_key = os.getenv("GROQ_API_KEY")
151
 
152
+ # --- UI Setup ---
153
+ st.set_page_config(page_title="Internal Knowledge Base AI", layout="wide", initial_sidebar_state="expanded")
154
+
155
+ # Custom CSS for a "catchy and elegant" design
156
+ st.markdown("""
157
  <style>
158
+ /* General body style */
159
+ body {
160
+ font-family: 'Segoe UI', Tahoma, Geneva, Verdana, sans-serif;
161
+ background-color: #f0f2f6; /* Light gray background */
162
  }
163
+ /* Main content area */
164
+ .main .block-container {
165
+ padding-top: 2rem;
166
+ padding-bottom: 2rem;
167
+ padding-left: 3rem;
168
+ padding-right: 3rem;
169
+ background-color: #ffffff; /* White content background */
170
  border-radius: 10px;
171
+ box-shadow: 0 4px 12px rgba(0,0,0,0.1); /* Subtle shadow */
172
+ }
173
+ /* Title style */
174
+ h1 {
175
+ color: #1E88E5; /* Catchy blue */
176
+ text-align: center;
177
+ font-weight: 600;
178
+ }
179
+ /* Sidebar style */
180
+ .stSidebar {
181
+ background-color: #E3F2FD; /* Light blue sidebar */
182
+ padding: 10px;
183
+ }
184
+ .stSidebar .sidebar-content {
185
+ background-color: #E3F2FD;
186
+ }
187
+ /* Input box style */
188
+ .stTextInput > div > div > input {
189
+ background-color: #f8f9fa;
190
+ border-radius: 5px;
191
+ border: 1px solid #ced4da;
192
  }
193
+ /* Button style */
194
  .stButton > button {
195
+ background-color: #1E88E5; /* Catchy blue */
 
196
  color: white;
197
+ border-radius: 5px;
198
+ padding: 0.5rem 1rem;
199
+ font-weight: 500;
200
+ border: none;
201
  transition: background-color 0.3s ease;
202
  }
203
  .stButton > button:hover {
204
+ background-color: #1565C0; /* Darker blue on hover */
 
 
 
205
  }
206
+ /* Status messages */
207
+ .stAlert { /* For st.info, st.success, st.warning, st.error */
208
+ border-radius: 5px;
209
  }
210
+ /* Response area */
211
+ .response-area {
212
+ background-color: #f8f9fa;
213
+ padding: 1rem;
214
+ border-radius: 5px;
215
+ border: 1px solid #e0e0e0;
216
+ margin-top: 1rem;
217
+ min-height: 100px;
 
 
 
 
 
 
 
 
218
  }
219
  </style>
220
+ """, unsafe_allow_html=True)
221
 
222
+ st.title("📚 Internal Knowledge Base AI 💡")
223
+
224
+ # Sidebar for status and information
225
+ st.sidebar.header("System Status")
226
+ status_placeholder = st.sidebar.empty()
227
+ status_placeholder.info("Initializing...")
228
 
229
+ if not groq_api_key:
230
+ status_placeholder.error("GROQ API Key not configured. Application cannot start.")
231
+ st.stop()
232
 
233
+ # --- Knowledge Base Loading ---
234
+ # This will be cached after the first run
235
+ with st.spinner("Knowledge Base is loading... Please wait."):
236
+ start_time = time.time()
237
+ processed_documents = load_and_process_documents(DOCS_DIR)
238
+ if not processed_documents:
239
+ status_placeholder.error("Failed to load or process documents. Check logs and `docs` folder.")
240
+ st.stop()
241
+
242
+ vector_store = create_vector_store(processed_documents, EMBEDDING_MODEL_NAME)
243
+ if not vector_store:
244
+ status_placeholder.error("Failed to create vector store. Application cannot proceed.")
245
+ st.stop()
246
+
247
+ llm = get_llm(groq_api_key)
248
+ if not llm:
249
+ status_placeholder.error("Failed to initialize LLM. Application cannot proceed.")
250
+ st.stop()
251
+
252
+ end_time = time.time()
253
+ status_placeholder.success(f"Application Ready! (Loaded in {end_time - start_time:.2f}s)")
254
+
255
+ retriever = vector_store.as_retriever(search_kwargs={"k": 5}) # Retrieve top 5 relevant chunks
256
+
257
+ # --- Query Input and Response ---
258
+ st.markdown("---")
259
+ st.subheader("Ask a question about our documents:")
260
+
261
+ # Prompt templates
262
+ GENERAL_QA_PROMPT = """
263
+ You are an AI assistant for our internal knowledge base.
264
+ Your goal is to provide accurate and concise answers based ONLY on the provided context.
265
+ Do not make up information. If the answer is not found in the context, state that clearly.
266
+ Ensure your answers are directly supported by the text.
267
+ Accuracy is paramount.
268
+
269
+ Context:
270
+ {context}
271
+
272
+ Question: {question}
273
+
274
+ Answer:
275
+ """
276
 
277
+ ORDER_STATUS_PROMPT = """
278
+ You are an AI assistant helping with customer order inquiries.
279
+ Based ONLY on the following retrieved information from our order system and policies:
280
+ {context}
281
 
282
+ The customer's query is: {question}
 
 
 
283
 
284
+ Please perform the following steps:
285
+ 1. Carefully analyze the context for any order details (Order ID, Customer Name, Status, Items, Dates, etc.).
286
+ 2. If an order matching the query (or related to a name in the query) is found in the context:
287
+ - Address the customer by their name if available in the order details (e.g., "Hello [Customer Name],").
288
+ - Provide ALL available information about their order, including Order ID, status, items, dates, and any other relevant details found in the context.
289
+ - Be comprehensive and clear.
290
+ 3. If no specific order details are found in the context that match the query, or if the context is insufficient, politely state that you couldn't find the specific order information in the provided documents and suggest they contact support for further assistance.
291
+ 4. Do NOT invent or infer any information not explicitly present in the context.
292
 
293
+ Answer:
294
+ """
295
+
296
+ # Use session state to store conversation history if desired, or just last query/response
297
+ if "messages" not in st.session_state:
298
+ st.session_state.messages = []
299
+
300
+ query = st.text_input("Enter your question:", key="query_input", placeholder="e.g., 'What is the return policy?' or 'Status of order for John Doe?'")
301
+
302
+ if st.button("Submit", key="submit_button"):
303
+ if query:
304
+ st.session_state.messages.append({"role": "user", "content": query})
305
+
306
+ # Determine prompt based on query type (simple keyword check)
307
+ # A more sophisticated intent detection could be used here (e.g., another LLM call, classifier)
308
+ if "order" in query.lower() and ("status" in query.lower() or "track" in query.lower() or "update" in query.lower() or any(name_part.lower() in query.lower() for name_part in ["customer", "client", "name"])): # Basic check for order status
309
+ active_prompt_template = ORDER_STATUS_PROMPT
310
+ st.sidebar.info("Using: Order Status Query Mode")
311
+ else:
312
+ active_prompt_template = GENERAL_QA_PROMPT
313
+ st.sidebar.info("Using: General Query Mode")
314
+
315
+ rag_chain = get_rag_chain(llm, retriever, active_prompt_template)
316
+
317
+ with st.spinner("Thinking..."):
318
+ try:
319
+ response = rag_chain.invoke(query)
320
+ st.session_state.messages.append({"role": "assistant", "content": response})
321
+ except Exception as e:
322
+ st.error(f"Error during RAG chain invocation: {e}")
323
+ response = "Sorry, I encountered an error while processing your request."
324
+ st.session_state.messages.append({"role": "assistant", "content": response})
325
+ else:
326
+ st.warning("Please enter a question.")
327
+
328
+ # Display chat messages
329
+ st.markdown("---")
330
+ st.subheader("Response:")
331
+ response_area = st.container()
332
+ response_area.add_rows([ # Create a container with fixed height and scroll
333
+ st.markdown(f"<div class='response-area'>{st.session_state.messages[-1]['content'] if st.session_state.messages and st.session_state.messages[-1]['role'] == 'assistant' else 'Ask a question to see the answer here.'}</div>", unsafe_allow_html=True)
334
+ ])
335
+
336
+
337
+ # Optional: Display retrieved context for debugging or transparency
338
+ # if st.sidebar.checkbox("Show Retrieved Context (for debugging)"):
339
+ # if query and vector_store: # Check if query and vector_store exist
340
+ # docs = retriever.get_relevant_documents(query)
341
+ # st.sidebar.subheader("Retrieved Context:")
342
+ # for i, doc in enumerate(docs):
343
+ # st.sidebar.text_area(f"Chunk {i+1} (Source: {doc.metadata.get('source', 'N/A')})", doc.page_content, height=150)
344
+
345
+ st.sidebar.markdown("---")
346
+ st.sidebar.markdown("Built with ❤️ using Streamlit & Langchain & Groq")
347
+
348
+
349
+ if __name__ == "__main__":
350
+ main()