masadonline commited on
Commit
fedd7f3
ยท
verified ยท
1 Parent(s): 09c3552

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +512 -273
app.py CHANGED
@@ -1,299 +1,538 @@
 
1
  import streamlit as st
2
  import os
3
- import glob
4
- from dotenv import load_dotenv
5
  import time
6
-
7
- from langchain_community.document_loaders import (
8
- PyPDFLoader,
9
- Docx2txtLoader,
10
- UnstructuredExcelLoader,
11
- JSONLoader,
12
- UnstructuredFileLoader # Generic loader, good for tables
13
- )
14
- from langchain_text_splitters import RecursiveCharacterTextSplitter
15
- from langchain.embeddings import HuggingFaceEmbeddings
16
- from langchain_community.vectorstores import FAISS
17
- from langchain_groq import ChatGroq
18
- from langchain.chains import RetrievalQA
19
- from langchain.prompts import PromptTemplate
20
- from langchain.schema.runnable import RunnablePassthrough
21
- from langchain.schema.output_parser import StrOutputParser
22
-
23
- # --- Configuration ---
24
- DOCS_DIR = "docs"
25
- # Using a local sentence transformer model for embeddings
26
- EMBEDDING_MODEL_NAME = "sentence-transformers/all-MiniLM-L6-v2"
27
- CACHE_DIR = ".streamlit_cache" # For potential disk-based caching if needed beyond Streamlit's default
28
-
29
- # Create docs and cache directory if they don't exist
30
- if not os.path.exists(DOCS_DIR):
31
- os.makedirs(DOCS_DIR)
32
- if not os.path.exists(CACHE_DIR):
33
- os.makedirs(CACHE_DIR)
34
-
35
- # --- Helper Function for Document Loading ---
36
- def get_loader(file_path):
37
- """Detects file type and returns appropriate Langchain loader."""
38
- _, ext = os.path.splitext(file_path)
39
- ext = ext.lower()
40
- # Prioritize UnstructuredFileLoader for robust table and content extraction
41
- # UnstructuredFileLoader can handle many types, but we can specify if needed
42
- if ext in ['.pdf', '.docx', '.doc', '.xlsx', '.xls', '.json', '.txt', '.md', '.html', '.xml', '.eml', '.msg']:
43
- return UnstructuredFileLoader(file_path, mode="elements", strategy="fast") # "elements" is good for tables
44
- # Fallback or specific loaders if UnstructuredFileLoader has issues with a particular file
45
- # elif ext == ".pdf":
46
- # return PyPDFLoader(file_path) # Basic PDF loader
47
- # elif ext in [".docx", ".doc"]:
48
- # return Docx2txtLoader(file_path) # Basic DOCX loader
49
- # elif ext in [".xlsx", ".xls"]:
50
- # return UnstructuredExcelLoader(file_path, mode="elements") # Unstructured for Excel
51
- # elif ext == ".json":
52
- # return JSONLoader(file_path, jq_schema='.[]', text_content=False) # Adjust jq_schema as needed
53
- else:
54
- st.warning(f"Unsupported file type: {ext}. Skipping {os.path.basename(file_path)}")
 
 
 
 
55
  return None
56
 
57
- # --- Caching Functions ---
58
-
59
- @st.cache_resource(show_spinner="Loading and Processing Documents...")
60
- def load_and_process_documents(docs_path: str):
61
- """
62
- Loads documents from the specified path, processes them, and splits into chunks.
63
- Uses UnstructuredFileLoader for potentially better table extraction.
64
- """
65
- documents = []
66
- doc_files = []
67
- for ext in ["*.pdf", "*.docx", "*.xlsx", "*.json", "*.txt", "*.md"]:
68
- doc_files.extend(glob.glob(os.path.join(docs_path, ext)))
69
-
70
- if not doc_files:
71
- st.error(f"No documents found in the '{docs_path}' directory. Please add some documents.")
72
- st.info("Supported formats: .pdf, .docx, .xlsx, .json, .txt, .md")
73
  return []
74
 
75
- for file_path in doc_files:
76
- try:
77
- st.write(f"Processing: {os.path.basename(file_path)}...") # Show progress
78
- loader = get_loader(file_path)
79
- if loader:
80
- loaded_docs = loader.load()
81
- # Add source metadata to each document for better traceability
82
- for doc in loaded_docs:
83
- doc.metadata["source"] = os.path.basename(file_path)
84
- documents.extend(loaded_docs)
85
- except Exception as e:
86
- st.error(f"Error loading {os.path.basename(file_path)}: {e}")
87
- st.warning(f"Skipping file {os.path.basename(file_path)} due to error.")
88
-
89
- if not documents:
90
- st.error("No documents were successfully loaded or processed.")
91
  return []
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
92
 
93
- text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=200)
94
- chunked_documents = text_splitter.split_documents(documents)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
95
 
96
- if not chunked_documents:
97
- st.error("Document processing resulted in no text chunks. Check document content and parsing.")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
98
  return []
99
 
100
- st.success(f"Successfully loaded and processed {len(doc_files)} documents into {len(chunked_documents)} chunks.")
101
- return chunked_documents
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
102
 
103
- @st.cache_resource(show_spinner="Creating Vector Store (Embeddings)...")
104
- def create_vector_store(_documents, _embedding_model_name: str):
105
- """Creates a FAISS vector store from the given documents and embedding model."""
106
- if not _documents:
107
- st.warning("Cannot create vector store: No documents processed.")
108
  return None
109
  try:
110
- embeddings = HuggingFaceEmbeddings(model_name=_embedding_model_name)
111
- vector_store = FAISS.from_documents(_documents, embedding=embeddings)
112
- st.success("Vector Store created successfully!")
113
- return vector_store
114
  except Exception as e:
115
- st.error(f"Error creating vector store: {e}")
116
  return None
117
 
118
- @st.cache_resource(show_spinner="Initializing LLM...")
119
- def get_llm(api_key: str, model_name: str = "llama3-8b-8192"): # UPDATED MODEL
120
- """Initializes the Groq LLM."""
121
- if not api_key:
122
- st.error("GROQ_API_KEY not found! Please set it in your environment variables or a .env file.")
123
  return None
124
  try:
125
- # Available models (check Groq documentation for the latest):
126
- # "llama3-8b-8192" (good balance of speed and capability)
127
- # "llama3-70b-8192" (more powerful, potentially slower)
128
- # "gemma-7b-it"
129
- llm = ChatGroq(temperature=0, groq_api_key=api_key, model_name=model_name)
130
- st.sidebar.info(f"LLM Initialized: {model_name}") # Add info about which model is used
131
- return llm
132
  except Exception as e:
133
- st.error(f"Error initializing Groq LLM: {e}")
134
  return None
135
 
136
- # --- RAG Chain Setup ---
137
- def get_rag_chain(llm, retriever, prompt_template):
138
- """Creates the Retrieval QA chain."""
139
- prompt = PromptTemplate.from_template(prompt_template)
140
- rag_chain = (
141
- {"context": retriever, "question": RunnablePassthrough()}
142
- | prompt
143
- | llm
144
- | StrOutputParser()
145
- )
146
- return rag_chain
147
-
148
- # --- Main Application Logic ---
149
- def main():
150
- load_dotenv()
151
- groq_api_key = os.getenv("GROQ_API_KEY")
152
-
153
- # --- UI Setup ---
154
- st.set_page_config(page_title="Internal Knowledge Base AI", layout="wide", initial_sidebar_state="expanded")
155
-
156
- # Custom CSS (remains the same)
157
- st.markdown("""
158
- <style>
159
- .reportview-container .main .block-container{
160
- padding-top: 2rem;
161
- padding-bottom: 2rem;
162
- }
163
- .st-emotion-cache-z5fcl4 {
164
- padding-top: 1rem;
165
- }
166
- .response-area {
167
- background-color: #f0f2f6;
168
- padding: 15px;
169
- border-radius: 5px;
170
- margin-top: 10px;
171
- }
172
- </style>
173
- """, unsafe_allow_html=True)
174
-
175
- st.title("๐Ÿ“š Internal Knowledge Base AI ๏ฟฝ")
176
-
177
- st.sidebar.header("System Status")
178
- status_placeholder = st.sidebar.empty()
179
- status_placeholder.info("Initializing...")
180
-
181
-
182
- if not groq_api_key:
183
- status_placeholder.error("GROQ API Key not configured. Application cannot start.")
184
- st.stop()
185
-
186
- # --- Knowledge Base Loading ---
187
- with st.spinner("Knowledge Base is loading... Please wait."):
188
- start_time = time.time()
189
- processed_documents = load_and_process_documents(DOCS_DIR)
190
- if not processed_documents:
191
- status_placeholder.error("Failed to load or process documents. Check logs and `docs` folder.")
192
- st.stop()
193
-
194
- vector_store = create_vector_store(processed_documents, EMBEDDING_MODEL_NAME)
195
- if not vector_store:
196
- status_placeholder.error("Failed to create vector store. Application cannot proceed.")
197
- st.stop()
198
-
199
- # Pass the selected model to get_llm
200
- llm = get_llm(groq_api_key, model_name="llama3-8b-8192") # Hardcoded to use llama3-8b-8192
201
- if not llm:
202
- # Error is already shown by get_llm, but update status_placeholder too
203
- status_placeholder.error("Failed to initialize LLM. Application cannot proceed.")
204
- st.stop()
205
-
206
- end_time = time.time()
207
- # status_placeholder is updated by get_llm or on success below
208
- status_placeholder.success(f"Application Ready! (Loaded in {end_time - start_time:.2f}s)")
209
-
210
- retriever = vector_store.as_retriever(search_kwargs={"k": 5})
211
-
212
- # --- Query Input and Response ---
213
-
214
- st.markdown("---")
215
- st.subheader("Ask a question about our documents:")
216
-
217
- # Prompt templates
218
- GENERAL_QA_PROMPT = """
219
- You are an AI assistant for our internal knowledge base.
220
- Your goal is to provide accurate and concise answers based ONLY on the provided context.
221
- Do not make up information. If the answer is not found in the context, state that clearly.
222
- Ensure your answers are directly supported by the text.
223
- Accuracy is paramount.
224
-
225
- Context:
226
- {context}
227
-
228
- Question: {question}
229
-
230
- Answer:
231
- """
232
-
233
- ORDER_STATUS_PROMPT = """
234
- You are an AI assistant helping with customer order inquiries.
235
- Based ONLY on the following retrieved information from our order system and policies:
236
- {context}
237
-
238
- The customer's query is: {question}
239
-
240
- Please perform the following steps:
241
- 1. Carefully analyze the context for any order details (Order ID, Customer Name, Status, Items, Dates, etc.).
242
- 2. If an order matching the query (or related to a name in the query) is found in the context:
243
- - Address the customer by their name if available in the order details (e.g., "Hello [Customer Name],").
244
- - Provide ALL available information about their order, including Order ID, status, items, dates, and any other relevant details found in the context.
245
- - Be comprehensive and clear.
246
- 3. If no specific order details are found in the context that match the query, or if the context is insufficient, politely state that you couldn't find the specific order information in the provided documents and suggest they contact support for further assistance.
247
- 4. Do NOT invent or infer any information not explicitly present in the context.
248
-
249
- Answer:
250
- """
251
-
252
- if "messages" not in st.session_state:
253
- st.session_state.messages = []
254
-
255
- query = st.text_input("Enter your question:", key="query_input", placeholder="e.g., 'What is the return policy?' or 'Status of order for John Doe?'")
256
-
257
- if st.button("Submit", key="submit_button"):
258
- if query:
259
- st.session_state.messages.append({"role": "user", "content": query})
260
-
261
- current_model_info = st.sidebar.empty() # Placeholder for current mode info
262
-
263
- if "order" in query.lower() and ("status" in query.lower() or "track" in query.lower() or "update" in query.lower() or any(name_part.lower() in query.lower() for name_part in ["customer", "client", "name"])):
264
- active_prompt_template = ORDER_STATUS_PROMPT
265
- current_model_info.info("Mode: Order Status Query")
266
- else:
267
- active_prompt_template = GENERAL_QA_PROMPT
268
- current_model_info.info("Mode: General Query")
269
-
270
- rag_chain = get_rag_chain(llm, retriever, active_prompt_template)
271
-
272
- with st.spinner("Thinking..."):
273
- try:
274
- response = rag_chain.invoke(query)
275
- st.session_state.messages.append({"role": "assistant", "content": response})
276
- except Exception as e:
277
- st.error(f"Error during RAG chain invocation: {e}")
278
- response = "Sorry, I encountered an error while processing your request."
279
- st.session_state.messages.append({"role": "assistant", "content": response})
280
- else:
281
- st.warning("Please enter a question.")
282
-
283
- st.markdown("---")
284
- st.subheader("Response:")
285
- response_area = st.container()
286
- # Ensure response_area is robust against empty messages or incorrect last role
287
- last_assistant_message = "Ask a question to see the answer here."
288
- if st.session_state.messages and st.session_state.messages[-1]['role'] == 'assistant':
289
- last_assistant_message = st.session_state.messages[-1]['content']
290
-
291
- response_area.markdown(f"<div class='response-area'>{last_assistant_message}</div>", unsafe_allow_html=True)
292
-
293
 
294
- st.sidebar.markdown("---")
295
- st.sidebar.markdown("Built with โค๏ธ using Streamlit & Langchain & Groq")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
296
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
297
 
298
- if __name__ == "__main__":
299
- main()
 
1
+ # app.py
2
  import streamlit as st
3
  import os
 
 
4
  import time
5
+ from datetime import datetime, timezone
6
+ import json
7
+ import PyPDF2
8
+ from sentence_transformers import SentenceTransformer
9
+ import faiss
10
+ import numpy as np
11
+ from twilio.rest import Client
12
+ from groq import Groq
13
+
14
+ # --- Page Configuration ---
15
+ st.set_page_config(page_title="RAG Customer Support Chatbot", layout="wide")
16
+
17
+ # --- Default Configurations & File Paths ---
18
+ DEFAULT_TWILIO_ACCOUNT_SID_FALLBACK = "" # Fallback if secret "TWILIO_SID" is not found
19
+ DEFAULT_TWILIO_AUTH_TOKEN_FALLBACK = "" # Fallback if secret "TWILIO_TOKEN" is not found
20
+ DEFAULT_GROQ_API_KEY_FALLBACK = "" # Fallback if secret "GROQ_API_KEY" is not found
21
+
22
+ DEFAULT_TWILIO_CONVERSATION_SERVICE_SID = ""
23
+ DEFAULT_TWILIO_BOT_WHATSAPP_IDENTITY = "whatsapp:+14155238886" # Twilio Sandbox default
24
+ DEFAULT_EMBEDDING_MODEL_NAME = "sentence-transformers/all-MiniLM-L6-v2"
25
+ DEFAULT_POLLING_INTERVAL_S = 30
26
+ DOCS_FOLDER = "docs/"
27
+ CUSTOMER_ORDERS_FILE = os.path.join(DOCS_FOLDER, "CustomersOrder.json")
28
+ PRODUCTS_FILE = os.path.join(DOCS_FOLDER, "products.json")
29
+ POLICY_PDF_FILE = os.path.join(DOCS_FOLDER, "ProductReturnPolicy.pdf")
30
+ FAQ_PDF_FILE = os.path.join(DOCS_FOLDER, "FAQ.pdf")
31
+
32
+ # --- Application Secrets Configuration ---
33
+ # These are the primary keys fetched from st.secrets as per user request
34
+ APP_TWILIO_ACCOUNT_SID = st.secrets.get("TWILIO_SID")
35
+ APP_TWILIO_AUTH_TOKEN = st.secrets.get("TWILIO_TOKEN")
36
+ APP_GROQ_API_KEY = st.secrets.get("GROQ_API_KEY")
37
+
38
+ # Other secrets with fallback to defaults/sidebar input (if secrets not found)
39
+ APP_TWILIO_CONVERSATION_SERVICE_SID_SECRET = st.secrets.get("TWILIO_CONVERSATION_SERVICE_SID")
40
+ APP_TWILIO_BOT_WHATSAPP_IDENTITY_SECRET = st.secrets.get("TWILIO_BOT_WHATSAPP_IDENTITY")
41
+
42
+
43
+ # --- RAG Processing Utilities ---
44
+ def load_json_data(file_path):
45
+ """Loads data from a JSON file."""
46
+ try:
47
+ with open(file_path, 'r', encoding='utf-8') as f:
48
+ data = json.load(f)
49
+ return data
50
+ except FileNotFoundError:
51
+ st.error(f"Error: JSON file not found at {file_path}")
52
+ return None
53
+ except json.JSONDecodeError:
54
+ st.error(f"Error: Could not decode JSON from {file_path}")
55
+ return None
56
+ except Exception as e:
57
+ st.error(f"An unexpected error occurred while loading {file_path}: {e}")
58
  return None
59
 
60
+ def load_pdf_data(file_path):
61
+ """Extracts text from a PDF file, page by page."""
62
+ try:
63
+ with open(file_path, 'rb') as f:
64
+ reader = PyPDF2.PdfReader(f)
65
+ text_pages = []
66
+ for page_num in range(len(reader.pages)):
67
+ page = reader.pages[page_num]
68
+ text_pages.append(page.extract_text() or "")
69
+ return text_pages
70
+ except FileNotFoundError:
71
+ st.error(f"Error: PDF file not found at {file_path}")
72
+ return []
73
+ except Exception as e:
74
+ st.error(f"An error occurred while processing PDF {file_path}: {e}")
 
75
  return []
76
 
77
+ def chunk_text(text_pages, chunk_size=1000, chunk_overlap=200):
78
+ """Chunks text from PDF pages into smaller, overlapping pieces."""
79
+ full_text = "\n".join(text_pages)
80
+ if not full_text.strip():
 
 
 
 
 
 
 
 
 
 
 
 
81
  return []
82
+ chunks = []
83
+ start = 0
84
+ while start < len(full_text):
85
+ end = start + chunk_size
86
+ chunks.append(full_text[start:end])
87
+ if end >= len(full_text):
88
+ break
89
+ start += (chunk_size - chunk_overlap)
90
+ if start >= len(full_text):
91
+ break
92
+ return [chunk for chunk in chunks if chunk.strip()]
93
+
94
+ @st.cache_resource(show_spinner="Initializing embedding model...")
95
+ def initialize_embedding_model(model_name=DEFAULT_EMBEDDING_MODEL_NAME):
96
+ """Initializes and returns a SentenceTransformer model."""
97
+ try:
98
+ model = SentenceTransformer(model_name)
99
+ return model
100
+ except Exception as e:
101
+ st.error(f"Error initializing embedding model '{model_name}': {e}")
102
+ return None
103
 
104
+ @st.cache_resource(show_spinner="Building FAISS index for PDF documents...")
105
+ def create_faiss_index(_text_chunks, _embedding_model):
106
+ """Creates a FAISS index from text chunks and an embedding model."""
107
+ if not _text_chunks or _embedding_model is None:
108
+ st.warning("Cannot create FAISS index: No text chunks or embedding model available.")
109
+ return None, []
110
+ try:
111
+ valid_chunks = [str(chunk) for chunk in _text_chunks if chunk and isinstance(chunk, str) and chunk.strip()]
112
+ if not valid_chunks:
113
+ st.warning("No valid text chunks to embed for FAISS index.")
114
+ return None, []
115
+ embeddings = _embedding_model.encode(valid_chunks, convert_to_tensor=False)
116
+ if embeddings.ndim == 1:
117
+ embeddings = embeddings.reshape(1, -1)
118
+ if embeddings.shape[0] == 0:
119
+ st.warning("No embeddings were generated for FAISS index.")
120
+ return None, []
121
+ dimension = embeddings.shape[1]
122
+ index = faiss.IndexFlatL2(dimension)
123
+ index.add(np.array(embeddings, dtype=np.float32))
124
+ return index, valid_chunks
125
+ except Exception as e:
126
+ st.error(f"Error creating FAISS index: {e}")
127
+ return None, []
128
 
129
+ def search_faiss_index(index, query_text, embedding_model, indexed_chunks, k=3):
130
+ """Searches the FAISS index and returns top_k relevant chunk texts."""
131
+ if index is None or embedding_model is None or not query_text:
132
+ return []
133
+ try:
134
+ query_embedding = embedding_model.encode([query_text], convert_to_tensor=False)
135
+ if query_embedding.ndim == 1:
136
+ query_embedding = query_embedding.reshape(1, -1)
137
+ distances, indices = index.search(np.array(query_embedding, dtype=np.float32), k)
138
+ results = []
139
+ for i in range(len(indices[0])):
140
+ idx = indices[0][i]
141
+ if 0 <= idx < len(indexed_chunks):
142
+ results.append(indexed_chunks[idx])
143
+ return results
144
+ except Exception as e:
145
+ st.error(f"Error searching FAISS index: {e}")
146
  return []
147
 
148
+ def get_order_details(order_id, customer_orders_data):
149
+ """Retrieves order details for a given order_id."""
150
+ if not customer_orders_data:
151
+ return "Customer order data is not loaded."
152
+ for order in customer_orders_data:
153
+ if order.get("order_id") == order_id:
154
+ return json.dumps(order, indent=2)
155
+ return f"No order found with ID: {order_id}."
156
+
157
+ def get_product_info(query, products_data):
158
+ """Retrieves product information based on a query."""
159
+ if not products_data:
160
+ return "Product data is not loaded."
161
+ query_lower = query.lower()
162
+ found_products = []
163
+ for product in products_data:
164
+ if query_lower in (product.get("name", "").lower()) or \
165
+ query_lower in (product.get("description", "").lower()) or \
166
+ query_lower == (product.get("product_id", "").lower()):
167
+ found_products.append(product)
168
+ if found_products:
169
+ return json.dumps(found_products, indent=2)
170
+ return f"No product information found matching your query: '{query}'."
171
+
172
+ # --- LLM Operations ---
173
+ @st.cache_data(show_spinner="Generating response with LLaMA3...")
174
+ def generate_response_groq(_groq_client, query, context, model="llama3-8b-8192"):
175
+ """Generates a response using GROQ LLaMA3 API."""
176
+ if not _groq_client:
177
+ return "GROQ client not initialized. Please check API key."
178
+ if not query:
179
+ return "Query is empty."
180
+ prompt = f"""You are a helpful customer support assistant.
181
+ Use the following context to answer the user's question.
182
+ If the context doesn't contain the answer, state that you don't have enough information.
183
+ Do not make up information. Be concise and polite.
184
+
185
+ Context:
186
+ {context}
187
+
188
+ User Question: {query}
189
+
190
+ Assistant Answer:
191
+ """
192
+ try:
193
+ chat_completion = _groq_client.chat.completions.create(
194
+ messages=[
195
+ {"role": "system", "content": "You are a helpful customer support assistant."},
196
+ {"role": "user", "content": prompt}
197
+ ],
198
+ model=model, temperature=0.7, max_tokens=1024, top_p=1
199
+ )
200
+ response = chat_completion.choices[0].message.content
201
+ return response
202
+ except Exception as e:
203
+ st.error(f"Error calling GROQ API: {e}")
204
+ return "Sorry, I encountered an error while trying to generate a response."
205
 
206
+ def initialize_groq_client(api_key_val):
207
+ """Initializes the GROQ client."""
208
+ if not api_key_val: # Changed parameter name to avoid conflict
209
+ st.warning("GROQ API Key is missing.")
 
210
  return None
211
  try:
212
+ client = Groq(api_key=api_key_val)
213
+ return client
 
 
214
  except Exception as e:
215
+ st.error(f"Failed to initialize GROQ client: {e}")
216
  return None
217
 
218
+ # --- Twilio Operations ---
219
+ def initialize_twilio_client(acc_sid, auth_tkn): # Changed parameter names
220
+ """Initializes the Twilio client."""
221
+ if not acc_sid or not auth_tkn:
222
+ st.warning("Twilio Account SID or Auth Token is missing.")
223
  return None
224
  try:
225
+ client = Client(acc_sid, auth_tkn)
226
+ return client
 
 
 
 
 
227
  except Exception as e:
228
+ st.error(f"Failed to initialize Twilio client: {e}")
229
  return None
230
 
231
+ def get_new_whatsapp_messages(twilio_client, conversation_service_sid_val, bot_start_time_utc, # Renamed
232
+ processed_message_sids, bot_whatsapp_identity_val): # Renamed
233
+ """Fetches new, unanswered WhatsApp messages from Twilio Conversations."""
234
+ if not twilio_client:
235
+ st.warning("Twilio client not initialized.")
236
+ return []
237
+ if not conversation_service_sid_val:
238
+ st.warning("Twilio Conversation Service SID not provided.")
239
+ return []
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
240
 
241
+ new_messages_to_process = []
242
+ try:
243
+ conversations = twilio_client.conversations.v1 \
244
+ .services(conversation_service_sid_val) \
245
+ .conversations \
246
+ .list(limit=50)
247
+
248
+ for conv in conversations:
249
+ if conv.date_updated and conv.date_updated > bot_start_time_utc:
250
+ messages = twilio_client.conversations.v1 \
251
+ .services(conversation_service_sid_val) \
252
+ .conversations(conv.sid) \
253
+ .messages \
254
+ .list(order='desc', limit=10)
255
+
256
+ for msg in messages:
257
+ if msg.sid in processed_message_sids:
258
+ continue
259
+ if msg.author and msg.author.lower() != bot_whatsapp_identity_val.lower() and \
260
+ msg.date_created and msg.date_created > bot_start_time_utc:
261
+ new_messages_to_process.append({
262
+ "conversation_sid": conv.sid, "message_sid": msg.sid,
263
+ "author_identity": msg.author, "message_body": msg.body,
264
+ "timestamp_utc": msg.date_created
265
+ })
266
+ break
267
+ except Exception as e:
268
+ st.error(f"Error fetching Twilio messages: {e}")
269
+ return sorted(new_messages_to_process, key=lambda m: m['timestamp_utc'])
270
+
271
+ def send_whatsapp_message(twilio_client, conversation_service_sid_val, conversation_sid, message_body, bot_identity_val): # Renamed
272
+ """Sends a message to a Twilio Conversation from the bot's identity."""
273
+ if not twilio_client:
274
+ st.error("Twilio client not initialized for sending message.")
275
+ return False
276
+ if not conversation_service_sid_val:
277
+ st.error("Twilio Conversation Service SID not provided for sending message.")
278
+ return False
279
+ if not bot_identity_val:
280
+ st.error("Bot identity not provided for sending message.")
281
+ return False
282
+ try:
283
+ twilio_client.conversations.v1 \
284
+ .services(conversation_service_sid_val) \
285
+ .conversations(conversation_sid) \
286
+ .messages \
287
+ .create(author=bot_identity_val, body=message_body)
288
+ st.success(f"Sent reply to conversation {conversation_sid}")
289
+ return True
290
+ except Exception as e:
291
+ st.error(f"Error sending Twilio message to {conversation_sid}: {e}")
292
+ return False
293
+
294
+ # --- Main Application Logic & UI ---
295
+ st.title("๐Ÿค– RAG-Based Customer Support Chatbot")
296
+ st.markdown("Powered by Streamlit, Twilio, GROQ LLaMA3, and FAISS.")
297
+
298
+ # --- Sidebar for Configurations ---
299
+ st.sidebar.title("โš™๏ธ Configurations")
300
+
301
+ # Use APP_ prefixed variables for values from secrets, then allow manual input if not found
302
+ if APP_TWILIO_ACCOUNT_SID:
303
+ st.sidebar.text_input("Twilio Account SID (from Secrets)", value="********" + APP_TWILIO_ACCOUNT_SID[-4:] if len(APP_TWILIO_ACCOUNT_SID) > 4 else "********", disabled=True)
304
+ twilio_account_sid_to_use = APP_TWILIO_ACCOUNT_SID
305
+ else:
306
+ st.sidebar.warning("Secret 'TWILIO_SID' not found.")
307
+ twilio_account_sid_to_use = st.sidebar.text_input("Twilio Account SID (Enter Manually)", value=DEFAULT_TWILIO_ACCOUNT_SID_FALLBACK, type="password")
308
+
309
+ if APP_TWILIO_AUTH_TOKEN:
310
+ st.sidebar.text_input("Twilio Auth Token (from Secrets)", value="********", disabled=True)
311
+ twilio_auth_token_to_use = APP_TWILIO_AUTH_TOKEN
312
+ else:
313
+ st.sidebar.warning("Secret 'TWILIO_TOKEN' not found.")
314
+ twilio_auth_token_to_use = st.sidebar.text_input("Twilio Auth Token (Enter Manually)", value=DEFAULT_TWILIO_AUTH_TOKEN_FALLBACK, type="password")
315
+
316
+ if APP_GROQ_API_KEY:
317
+ st.sidebar.text_input("GROQ API Key (from Secrets)", value="gsk_********" + APP_GROQ_API_KEY[-4:] if len(APP_GROQ_API_KEY) > 8 else "********", disabled=True)
318
+ groq_api_key_to_use = APP_GROQ_API_KEY
319
+ else:
320
+ st.sidebar.warning("Secret 'GROQ_API_KEY' not found.")
321
+ groq_api_key_to_use = st.sidebar.text_input("GROQ API Key (Enter Manually)", value=DEFAULT_GROQ_API_KEY_FALLBACK, type="password")
322
+
323
+ # For other configurations that can be overridden if secrets not found or for user preference
324
+ twilio_conversation_service_sid_to_use = st.sidebar.text_input(
325
+ "Twilio Conversation Service SID (IS...)",
326
+ value=APP_TWILIO_CONVERSATION_SERVICE_SID_SECRET or DEFAULT_TWILIO_CONVERSATION_SERVICE_SID,
327
+ type="password",
328
+ help="The SID of your Twilio Conversations Service. Can be set by 'TWILIO_CONVERSATION_SERVICE_SID' secret."
329
+ )
330
+ twilio_bot_whatsapp_identity_to_use = st.sidebar.text_input(
331
+ "Twilio Bot WhatsApp Identity",
332
+ value=APP_TWILIO_BOT_WHATSAPP_IDENTITY_SECRET or DEFAULT_TWILIO_BOT_WHATSAPP_IDENTITY,
333
+ help="e.g., 'whatsapp:+1234567890'. Can be set by 'TWILIO_BOT_WHATSAPP_IDENTITY' secret."
334
+ )
335
+ embedding_model_name_to_use = st.sidebar.text_input( # Renamed
336
+ "Embedding Model Name",
337
+ value=DEFAULT_EMBEDDING_MODEL_NAME
338
+ )
339
+ polling_interval_to_use = st.sidebar.number_input( # Renamed
340
+ "Twilio Polling Interval (seconds)",
341
+ min_value=10, max_value=300,
342
+ value=DEFAULT_POLLING_INTERVAL_S,
343
+ step=5
344
+ )
345
 
346
+ # --- Initialize Session State ---
347
+ if "app_started" not in st.session_state: st.session_state.app_started = False
348
+ if "bot_started" not in st.session_state: st.session_state.bot_started = False
349
+ if "rag_pipeline_ready" not in st.session_state: st.session_state.rag_pipeline_ready = False
350
+ if "last_twilio_poll_time" not in st.session_state: st.session_state.last_twilio_poll_time = time.time()
351
+ if "bot_start_time_utc" not in st.session_state: st.session_state.bot_start_time_utc = None
352
+ if "processed_message_sids" not in st.session_state: st.session_state.processed_message_sids = set()
353
+ if "manual_chat_history" not in st.session_state: st.session_state.manual_chat_history = []
354
+
355
+ # --- Helper: Simple Intent Classifier ---
356
+ def simple_intent_classifier(query):
357
+ query_lower = query.lower()
358
+ if any(k in query_lower for k in ["order", "status", "track", "delivery"]): return "ORDER_STATUS"
359
+ if any(k in query_lower for k in ["product", "item", "buy", "price", "feature", "stock"]): return "PRODUCT_INFO"
360
+ if any(k in query_lower for k in ["return", "policy", "refund", "exchange", "faq", "question", "how to", "support"]): return "GENERAL_POLICY_FAQ"
361
+ return "UNKNOWN"
362
+
363
+ # --- Main Application Controls ---
364
+ col1, col2, col3, col4 = st.columns(4)
365
+ with col1:
366
+ if st.button("๐Ÿš€ Start App", disabled=st.session_state.app_started, use_container_width=True):
367
+ if not groq_api_key_to_use: # Use the correct variable
368
+ st.error("GROQ API Key is required.")
369
+ else:
370
+ with st.spinner("Initializing RAG pipeline..."):
371
+ st.session_state.embedding_model = initialize_embedding_model(embedding_model_name_to_use) # Use correct var
372
+ st.session_state.customer_orders_data = load_json_data(CUSTOMER_ORDERS_FILE)
373
+ st.session_state.products_data = load_json_data(PRODUCTS_FILE)
374
+ policy_pdf_pages = load_pdf_data(POLICY_PDF_FILE)
375
+ faq_pdf_pages = load_pdf_data(FAQ_PDF_FILE)
376
+ all_pdf_text_pages = policy_pdf_pages + faq_pdf_pages
377
+ st.session_state.pdf_text_chunks_raw = chunk_text(all_pdf_text_pages)
378
+
379
+ if st.session_state.embedding_model and st.session_state.pdf_text_chunks_raw:
380
+ st.session_state.faiss_index_pdfs, st.session_state.indexed_pdf_chunks = \
381
+ create_faiss_index(st.session_state.pdf_text_chunks_raw, st.session_state.embedding_model)
382
+ else:
383
+ st.session_state.faiss_index_pdfs, st.session_state.indexed_pdf_chunks = None, []
384
+ st.warning("FAISS index for PDFs could not be created.")
385
+
386
+ st.session_state.groq_client = initialize_groq_client(groq_api_key_to_use) # Use correct var
387
+
388
+ if st.session_state.embedding_model and st.session_state.groq_client and \
389
+ st.session_state.customer_orders_data and st.session_state.products_data:
390
+ st.session_state.rag_pipeline_ready = True
391
+ st.session_state.app_started = True
392
+ st.success("RAG Application Started!")
393
+ st.rerun()
394
+ else:
395
+ st.error("Failed to initialize RAG pipeline. Check configurations and ensure all data files are present in 'docs/'.")
396
+ st.session_state.app_started = False
397
+ with col2:
398
+ if st.button("๐Ÿ›‘ Stop App", disabled=not st.session_state.app_started, use_container_width=True):
399
+ keys_to_reset = ["app_started", "bot_started", "rag_pipeline_ready", "embedding_model",
400
+ "customer_orders_data", "products_data", "pdf_text_chunks_raw",
401
+ "faiss_index_pdfs", "indexed_pdf_chunks", "groq_client", "twilio_client",
402
+ "bot_start_time_utc", "processed_message_sids", "manual_chat_history"]
403
+ for key in keys_to_reset:
404
+ if key in st.session_state: del st.session_state[key]
405
+ st.session_state.app_started = False
406
+ st.session_state.bot_started = False
407
+ st.session_state.rag_pipeline_ready = False
408
+ st.session_state.processed_message_sids = set()
409
+ st.session_state.manual_chat_history = []
410
+ st.success("Application Stopped.")
411
+ st.rerun()
412
+ with col3:
413
+ if st.button("๐Ÿ’ฌ Start WhatsApp Bot", disabled=not st.session_state.app_started or st.session_state.bot_started, use_container_width=True):
414
+ if not all([twilio_account_sid_to_use, twilio_auth_token_to_use, twilio_conversation_service_sid_to_use, twilio_bot_whatsapp_identity_to_use]): # Use correct vars
415
+ st.error("Twilio credentials, Service SID, and Bot Identity are required.")
416
+ else:
417
+ st.session_state.twilio_client = initialize_twilio_client(twilio_account_sid_to_use, twilio_auth_token_to_use) # Use correct vars
418
+ if st.session_state.twilio_client:
419
+ st.session_state.bot_started = True
420
+ st.session_state.bot_start_time_utc = datetime.now(timezone.utc)
421
+ st.session_state.processed_message_sids = set()
422
+ st.session_state.last_twilio_poll_time = time.time() - polling_interval_to_use -1 # Use correct var
423
+ st.success("WhatsApp Bot Started!")
424
+ st.rerun()
425
+ else:
426
+ st.error("Failed to initialize Twilio client.")
427
+ with col4:
428
+ if st.button("๐Ÿ”• Stop WhatsApp Bot", disabled=not st.session_state.bot_started, use_container_width=True):
429
+ st.session_state.bot_started = False
430
+ st.info("WhatsApp Bot Stopped.")
431
+ st.rerun()
432
+ st.divider()
433
+
434
+ # --- Manual Query Interface ---
435
+ if st.session_state.get("app_started") and st.session_state.get("rag_pipeline_ready"):
436
+ st.subheader("๐Ÿ’ฌ Manual Query")
437
+ for chat_entry in st.session_state.manual_chat_history:
438
+ with st.chat_message(chat_entry["role"]):
439
+ st.markdown(chat_entry["content"])
440
+ if "context" in chat_entry and chat_entry["context"]:
441
+ with st.expander("Retrieved Context"): st.json(chat_entry["context"])
442
+
443
+ user_query_manual = st.chat_input("Ask a question:")
444
+ if user_query_manual:
445
+ st.session_state.manual_chat_history.append({"role": "user", "content": user_query_manual})
446
+ with st.chat_message("user"): st.markdown(user_query_manual)
447
+
448
+ with st.spinner("Thinking..."):
449
+ intent = simple_intent_classifier(user_query_manual)
450
+ context_for_llm, raw_context_data = "No specific context.", None
451
+
452
+ if intent == "ORDER_STATUS":
453
+ words = user_query_manual.split()
454
+ potential_oid = next((w for w in words if w.upper().startswith("ORD")), None)
455
+ if potential_oid:
456
+ raw_context_data = get_order_details(potential_oid.upper(), st.session_state.customer_orders_data)
457
+ context_for_llm = f"Order Details: {raw_context_data}"
458
+ else:
459
+ context_for_llm = "Please provide an Order ID."
460
+ raw_context_data = {"message": "Order ID needed."}
461
+ elif intent == "PRODUCT_INFO":
462
+ raw_context_data = get_product_info(user_query_manual, st.session_state.products_data)
463
+ context_for_llm = f"Product Information: {raw_context_data}"
464
+ elif intent == "GENERAL_POLICY_FAQ" or intent == "UNKNOWN":
465
+ if st.session_state.faiss_index_pdfs and st.session_state.embedding_model:
466
+ k_val = 2 if intent == "GENERAL_POLICY_FAQ" else 1
467
+ retrieved_chunks = search_faiss_index(st.session_state.faiss_index_pdfs, user_query_manual,
468
+ st.session_state.embedding_model, st.session_state.indexed_pdf_chunks, k=k_val)
469
+ if retrieved_chunks:
470
+ context_for_llm = "\n\n".join(retrieved_chunks)
471
+ raw_context_data = retrieved_chunks
472
+ else:
473
+ context_for_llm = "No specific policy/FAQ info found." if intent == "GENERAL_POLICY_FAQ" else "Could not find relevant info."
474
+ raw_context_data = {"message": "No relevant PDF chunks found."}
475
+ else:
476
+ context_for_llm = "Policy/FAQ documents unavailable."
477
+ raw_context_data = {"message": "PDF index not ready."}
478
+
479
+ llm_response = generate_response_groq(st.session_state.groq_client, user_query_manual, context_for_llm)
480
+ with st.chat_message("assistant"):
481
+ st.markdown(llm_response)
482
+ if raw_context_data:
483
+ with st.expander("Retrieved Context"):
484
+ try:
485
+ if isinstance(raw_context_data, str) and (raw_context_data.strip().startswith('{') or raw_context_data.strip().startswith('[')):
486
+ st.json(json.loads(raw_context_data))
487
+ else: st.json(raw_context_data)
488
+ except (json.JSONDecodeError, TypeError): st.text(str(raw_context_data))
489
+ st.session_state.manual_chat_history.append({"role": "assistant", "content": llm_response, "context": raw_context_data})
490
+
491
+ # --- Twilio Bot Polling Logic ---
492
+ if st.session_state.get("bot_started") and st.session_state.get("rag_pipeline_ready"):
493
+ current_time = time.time()
494
+ if (current_time - st.session_state.get("last_twilio_poll_time", 0)) > polling_interval_to_use: # Use correct var
495
+ st.session_state.last_twilio_poll_time = current_time
496
+ with st.spinner("Checking WhatsApp messages..."):
497
+ if not st.session_state.get("twilio_client") or not twilio_conversation_service_sid_to_use or not twilio_bot_whatsapp_identity_to_use: # Use correct vars
498
+ st.warning("Twilio client/config missing for polling.")
499
+ else:
500
+ new_messages = get_new_whatsapp_messages(st.session_state.twilio_client, twilio_conversation_service_sid_to_use,
501
+ st.session_state.bot_start_time_utc, st.session_state.processed_message_sids,
502
+ twilio_bot_whatsapp_identity_to_use) # Use correct vars
503
+ if new_messages:
504
+ st.info(f"Found {len(new_messages)} new WhatsApp message(s).")
505
+ for msg_data in new_messages:
506
+ user_query_whatsapp, conv_sid, msg_sid, author_id = msg_data["message_body"], msg_data["conversation_sid"], msg_data["message_sid"], msg_data["author_identity"]
507
+ st.write(f"Processing from {author_id} in {conv_sid}: '{user_query_whatsapp}'")
508
+
509
+ intent = simple_intent_classifier(user_query_whatsapp)
510
+ context_whatsapp = "No specific context."
511
+ if intent == "ORDER_STATUS":
512
+ words = user_query_whatsapp.split()
513
+ oid = next((w for w in words if w.upper().startswith("ORD")), None)
514
+ context_whatsapp = f"Order Details: {get_order_details(oid.upper(), st.session_state.customer_orders_data)}" if oid else "Please provide Order ID."
515
+ elif intent == "PRODUCT_INFO":
516
+ context_whatsapp = f"Product Info: {get_product_info(user_query_whatsapp, st.session_state.products_data)}"
517
+ elif intent == "GENERAL_POLICY_FAQ" or intent == "UNKNOWN":
518
+ if st.session_state.faiss_index_pdfs and st.session_state.embedding_model:
519
+ k_val = 2 if intent == "GENERAL_POLICY_FAQ" else 1
520
+ chunks = search_faiss_index(st.session_state.faiss_index_pdfs, user_query_whatsapp, st.session_state.embedding_model, st.session_state.indexed_pdf_chunks, k=k_val)
521
+ context_whatsapp = "\n\n".join(chunks) if chunks else ("No policy/FAQ info." if intent == "GENERAL_POLICY_FAQ" else "No relevant info.")
522
+ else: context_whatsapp = "Policy/FAQ docs unavailable."
523
+
524
+ response_whatsapp = generate_response_groq(st.session_state.groq_client, user_query_whatsapp, context_whatsapp)
525
+ if send_whatsapp_message(st.session_state.twilio_client, twilio_conversation_service_sid_to_use, conv_sid, response_whatsapp, twilio_bot_whatsapp_identity_to_use): # Use correct vars
526
+ st.session_state.processed_message_sids.add(msg_sid)
527
+ st.success(f"Responded to {msg_sid} from {author_id}")
528
+ else: st.error(f"Failed to send response for {msg_sid}")
529
+ st.experimental_rerun()
530
+
531
+ # --- Footer & Status ---
532
+ st.sidebar.markdown("---")
533
+ st.sidebar.info("Ensure all keys and SIDs are correctly configured. Primary API keys (Twilio SID/Token, GROQ Key) are loaded from secrets if available.")
534
+ if st.session_state.get("app_started"):
535
+ st.sidebar.success(f"App RUNNING. WhatsApp Bot {'RUNNING' if st.session_state.get('bot_started') else 'STOPPED'}.")
536
+ else:
537
+ st.sidebar.warning("App is STOPPED.")
538