File size: 19,678 Bytes
a29e958
f4e7b4f
a29e958
 
3f46408
70f965e
a29e958
 
 
 
 
 
 
 
 
3f46408
a29e958
 
 
 
 
 
70f965e
a29e958
 
 
 
 
2e93654
3f46408
a29e958
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ef85737
 
a29e958
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ef85737
 
 
a29e958
ef85737
a29e958
 
 
ef85737
a29e958
ef85737
 
 
a29e958
ef85737
a29e958
 
70f965e
 
a29e958
 
70f965e
a29e958
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
70f965e
ef85737
a29e958
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
70f965e
 
a29e958
 
 
 
 
 
70f965e
a29e958
 
ef85737
a29e958
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ef85737
a29e958
 
 
ef85737
a29e958
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ef85737
a29e958
 
ef85737
a29e958
 
ef85737
a29e958
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ef85737
a29e958
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ef85737
 
 
 
 
a29e958
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
import streamlit as st
import os
import glob
from dotenv import load_dotenv
import time
import threading
from twilio.rest import Client  # Import Twilio client
from langchain_community.document_loaders import (
    PyPDFLoader,
    Docx2txtLoader,
    UnstructuredExcelLoader,
    JSONLoader,
    UnstructuredFileLoader  # Generic loader, good for tables
)
from langchain_text_splitters import RecursiveCharacterTextSplitter
from langchain.embeddings import HuggingFaceEmbeddings
from langchain_community.vectorstores import FAISS
from langchain_groq import ChatGroq
from langchain.chains import RetrievalQA
from langchain.prompts import PromptTemplate
from langchain.schema.runnable import RunnablePassthrough
from langchain.schema.output_parser import StrOutputParser

# --- Configuration ---
# --- Moved groq_api_key here ---
load_dotenv()
groq_api_key = os.getenv("GROQ_API_KEY")
# groq_api_key = ""
DOCS_DIR = "docs"
EMBEDDING_MODEL_NAME = "sentence-transformers/all-MiniLM-L6-v2"
CACHE_DIR = ".streamlit_cache"
GENERAL_QA_PROMPT = """
You are an AI assistant for our internal knowledge base.
Your goal is to provide accurate and concise answers based ONLY on the provided context.
Do not make up information. If the answer is not found in the context, state that clearly.
Ensure your answers are directly supported by the text.
Accuracy is paramount.

Context:
{context}

Question: {question}

Answer:
"""
ORDER_STATUS_PROMPT = """
You are an AI assistant helping with customer order inquiries.
Based ONLY on the following retrieved information from our order system and policies:
{context}

The customer's query is: {question}

Please perform the following steps:
1. Carefully analyze the context for any order details (Order ID, Customer Name, Status, Items, Dates, etc.).
2. If an order matching the query (or related to a name in the query) is found in the context:
    - Address the customer by their name if available in the order details (e.g., "Hello [Customer Name],").
    - Provide ALL available information about their order, including Order ID, status, items, dates, and any other relevant details found in the context.
    - Be comprehensive and clear.
3. If no specific order details are found in the context that match the query, politely state that you couldn't find the specific order information in the provided documents and suggest they contact support for further assistance.
4. Do NOT invent or infer any information not explicitly present in the context.

Answer:
"""
MONITOR_INTERVAL_SECONDS = 30  # Add the constant for the monitoring interval
APP_START_TIME = time.time()

# Twilio Configuration (Add your Twilio credentials here)
account_sid = os.getenv("TWILIO_ACCOUNT_SID")
auth_token = os.getenv("TWILIO_AUTH_TOKEN")
twilio_number = os.getenv("TWILIO_PHONE_NUMBER")  # Your Twilio phone number


# Create docs and cache directory if they don't exist
if not os.path.exists(DOCS_DIR):
    os.makedirs(DOCS_DIR)
if not os.path.exists(CACHE_DIR):
    os.makedirs(CACHE_DIR)


# --- Helper Function for Document Loading ---
def get_loader(file_path):
    """Detects file type and returns appropriate Langchain loader."""
    _, ext = os.path.splitext(file_path)
    ext = ext.lower()
    # Prioritize UnstructuredFileLoader for robust table and content extraction
    # UnstructuredFileLoader can handle many types, but we can specify if needed
    if ext in ['.pdf', '.docx', '.doc', '.xlsx', '.xls', '.json', '.txt', '.md', '.html', '.xml', '.eml', '.msg']:
        return UnstructuredFileLoader(file_path, mode="elements", strategy="fast")  # "elements" is good for tables
    # Fallback or specific loaders if UnstructuredFileLoader has issues with a particular file
    # elif ext == ".pdf":
    #    return PyPDFLoader(file_path) # Basic PDF loader
    # elif ext in [".docx", ".doc"]:
    #    return Docx2txtLoader(file_path) # Basic DOCX loader
    # elif ext in [".xlsx", ".xls"]:
    #    return UnstructuredExcelLoader(file_path, mode="elements") # Unstructured for Excel
    # elif ext == ".json":
    #    return JSONLoader(file_path, jq_schema='.[]', text_content=False) # Adjust jq_schema as needed
    else:
        st.warning(f"Unsupported file type: {ext}. Skipping {os.path.basename(file_path)}")
        return None


# --- Caching Functions ---
@st.cache_resource(show_spinner=False)  # Disable spinner during initial load
def load_and_process_documents(docs_path: str):
    """
    Loads documents from the specified path, processes them, and splits into chunks.
    Uses UnstructuredFileLoader for potentially better table extraction.
    """
    documents = []
    doc_files = []
    for ext in ["*.pdf", "*.docx", "*.xlsx", "*.json", "*.txt", "*.md"]:
        doc_files.extend(glob.glob(os.path.join(docs_path, ext)))

    if not doc_files:
        st.error(f"No documents found in the '{docs_path}' directory. Please add some documents.")
        st.info("Supported formats: .pdf, .docx, .xlsx, .json, .txt, .md")
        return []

    for file_path in doc_files:
        try:
            print(f"Processing: {os.path.basename(file_path)}...")  # Show progress
            loader = get_loader(file_path)
            if loader:
                loaded_docs = loader.load()
                # Add source metadata to each document for better traceability
                for doc in loaded_docs:
                    doc.metadata["source"] = os.path.basename(file_path)
                documents.extend(loaded_docs)
        except Exception as e:
            st.error(f"Error loading {os.path.basename(file_path)}: {e}")
            st.warning(f"Skipping file {os.path.basename(file_path)} due to error.")

    if not documents:
        st.error("No documents were successfully loaded or processed.")
        return []

    text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=200)
    chunked_documents = text_splitter.split_documents(documents)

    if not chunked_documents:
        st.error("Document processing resulted in no text chunks. Check document content and parsing.")
        return []

    st.success(f"Successfully loaded and processed {len(doc_files)} documents into {len(chunked_documents)} chunks.")
    return chunked_documents


@st.cache_resource(show_spinner=False)  # Disable spinner during initial load
def create_vector_store(_documents, _embedding_model_name: str):
    """Creates a FAISS vector store from the given documents and embedding model."""
    if not _documents:
        st.warning("Cannot create vector store: No documents processed.")
        return None
    try:
        embeddings = HuggingFaceEmbeddings(model_name=_embedding_model_name)
        vector_store = FAISS.from_documents(_documents, embedding=embeddings)
        st.success("Vector Store created successfully!")
        return vector_store
    except Exception as e:
        st.error(f"Error creating vector store: {e}")
        # Return an empty FAISS instance instead of None to prevent the AttributeError.
        embeddings = HuggingFaceEmbeddings(model_name=_embedding_model_name)  # Initialize embeddings
        vector_store = FAISS.from_documents([], embeddings)  # Changed from None to FAISS.from_documents
        return vector_store


@st.cache_resource(show_spinner=False)  # Disable spinner during initial load
def get_llm(api_key: str, model_name: str = "llama3-8b-8192"):  # UPDATED MODEL
    """Initializes the Groq LLM."""
    if not api_key:
        st.error("GROQ_API_KEY not found! Please set it in your environment variables or a .env file.")
        return None
    try:
        # Available models (check Groq documentation for the latest):
        # "llama3-8b-8192" (good balance of speed and capability)
        # "llama3-70b-8192" (more powerful, potentially slower)
        # "gemma-7b-it"
        llm = ChatGroq(temperature=0, groq_api_key=api_key, model_name=model_name)
        st.sidebar.info(f"LLM Initialized: {model_name}")  # Add info about which model is used
        return llm
    except Exception as e:
        st.error(f"Error initializing Groq LLM: {e}")
        return None


# --- RAG Chain Setup ---
def get_rag_chain(llm, retriever, prompt_template):
    """Creates the Retrieval QA chain."""
    prompt = PromptTemplate.from_template(prompt_template)
    rag_chain = (
        {"context": retriever, "question": RunnablePassthrough()}
        | prompt
        | llm
        | StrOutputParser()
    )
    return rag_chain


# --- Twilio Helper Functions ---
def send_twilio_message(client, conversation_sid, message):
    """Sends a message to a Twilio Conversation."""
    try:
        client.conversations.v1.conversations(conversation_sid).messages.create(
            author="Internal Knowledge Base AI",  # Or some identifier
            body=message,
        )
    except Exception as e:
        print(f"❌ Error sending Twilio message: {e}")


def fetch_latest_incoming_message(client, conversation_sid):
    """Fetches the latest incoming message from a conversation."""
    try:
        messages = client.conversations.v1.conversations(conversation_sid).messages.list(limit=1)
        for msg in messages:
            if msg.direction == "inbound":
                return {
                    "body": msg.body,
                    "author": msg.author,
                    "timestamp": msg.date_created
                }
        return None  # No incoming message found
    except Exception as e:
        print(f"❌ Error fetching latest message: {e}")
        return None


def poll_conversation(convo_sid, rag_chain, client):
    """Polls a single conversation for new messages and responds."""
    last_processed_timestamp = {}
    while True:
        try:
            latest_msg = fetch_latest_incoming_message(client, convo_sid)
            if latest_msg:
                msg_time = latest_msg["timestamp"]
                if convo_sid not in last_processed_timestamp or msg_time > last_processed_timestamp[convo_sid]:
                    last_processed_timestamp[convo_sid] = msg_time
                    question = latest_msg["body"]
                    sender = latest_msg["author"]
                    print(f"\nπŸ“₯ New message from {sender} in {convo_sid}: {question}")
                    # context = "\n\n".join(retrieve_chunks(question, index, embed_model, text_chunks))
                    # answer = generate_answer_with_groq(question, context)
                    try:
                        if "order" in question.lower() and (
                                "status" in question.lower() or "track" in question.lower() or "update" in question.lower() or any(
                                name_part.lower() in question.lower() for name_part in ["customer", "client", "name"])):
                            answer = rag_chain.invoke(question, config={'prompt': ORDER_STATUS_PROMPT})
                        else:
                            answer = rag_chain.invoke(question, config={'prompt': GENERAL_QA_PROMPT})
                        send_twilio_message(client, convo_sid, answer)
                        print(f"πŸ“€ Replied to {sender}: {answer}")
                    except Exception as e:
                        print(f"❌ Error during RAG chain invocation: {e}")
                        answer = "Sorry, I encountered an error while processing your request."
                        send_twilio_message(client, convo_sid, answer)
            time.sleep(3)  # Reduced polling interval
        except Exception as e:
            print(f"❌ Error in convo {convo_sid} polling:", e)
            time.sleep(5)


def poll_new_conversations(client, vector_store, llm):
    """Polls for new conversations and starts monitoring them."""
    processed_convos = set()
    print("➑️ Monitoring for new WhatsApp conversations...")
    rag_chain = get_rag_chain(llm, vector_store.as_retriever(search_kwargs={"k": 5}), GENERAL_QA_PROMPT)
    while True:
        try:
            conversations = client.conversations.v1.conversations.list(limit=20)  # Adjust limit as needed
            for convo in conversations:
                convo_full = client.conversations.v1.conversations(convo.sid).fetch()
                if convo.sid not in processed_convos and convo_full.date_created > APP_START_TIME:
                    participants = client.conversations.v1.conversations(convo.sid).participants.list()
                    for p in participants:
                        address = p.messaging_binding.get("address", "") if p.messaging_binding else ""
                        if address.startswith("whatsapp:"):
                            print(f"πŸ†• New WhatsApp convo found: {convo.sid}")
                            processed_convos.add(convo.sid)
                            threading.Thread(target=poll_conversation, args=(convo.sid, rag_chain, client),
                                             daemon=True).start()
        except Exception as e:
            print("❌ Error polling conversations:", e)
        time.sleep(MONITOR_INTERVAL_SECONDS)  # Use the defined interval


def start_conversation_monitor(client, vector_store, llm):
    """Starts the conversation monitoring process."""
    # Launch the new conversation polling in a separate thread
    threading.Thread(target=poll_new_conversations, args=(client, vector_store, llm),
                     daemon=True).start()
    print("🟒 WhatsApp monitoring started.")  # Add a message


# --- Main Application Logic ---
def main():
    # --- UI Setup ---
    st.set_page_config(page_title="Internal Knowledge Base AI", layout="wide", initial_sidebar_state="expanded")

    # Custom CSS (remains the same)
    st.markdown("""
    <style>
        .reportview-container .main .block-container{
            padding-top: 2rem;
            padding-bottom: 2rem;
        }
        .st-emotion-cache-z5fcl4 {
            padding-top: 1rem;
        }
        .response-area {
            background-color: #f0f2f6;
            padding: 15px;
            border-radius: 5px;
            margin-top: 10px;
        }
    </style>
    """, unsafe_allow_html=True)

    st.title("πŸ“š Internal Knowledge Base AI πŸ’‘")

    st.sidebar.header("System Status")
    status_placeholder = st.sidebar.empty()
    status_placeholder.info("Initializing...")

    if not groq_api_key:
        status_placeholder.error("GROQ API Key not configured. Application cannot start.")
        st.stop()

    # --- Initialize session state ---
    if "app_initialized" not in st.session_state:
        st.session_state.app_initialized = False

    # --- Start Button ---
    if not st.session_state.app_initialized:
        if st.button("Start"):  # Create a start button
            st.session_state.app_initialized = True  # set the session state to true
            st.rerun()  # Rerun the app to trigger the knowledge base loading

    # --- Knowledge Base Loading and LLM Initialization ---
    if st.session_state.app_initialized:  # only run if the app has been initialized
        with st.spinner("Knowledge Base is loading... Please wait."):
            start_time = time.time()
            processed_documents = load_and_process_documents(DOCS_DIR)
            if not processed_documents:
                status_placeholder.error("Failed to load or process documents. Check logs and `docs` folder.")
                st.stop()

            vector_store = create_vector_store(processed_documents, EMBEDDING_MODEL_NAME)
            if not vector_store:
                status_placeholder.error("Failed to create vector store. Application cannot proceed.")
                st.stop()

            # Pass the selected model to get_llm
            llm = get_llm(groq_api_key, model_name="llama3-8b-8192")  # Hardcoded to use llama3-8b-8192
            if not llm:
                # Error is already shown by get_llm, but update status_placeholder too
                status_placeholder.error("Failed to initialize LLM. Application cannot proceed.")
                st.stop()

            end_time = time.time()
            # status_placeholder is updated by get_llm or on success below
            status_placeholder.success(f"Application Ready! (Loaded in {end_time - start_time:.2f}s)")

        # --- Initialize Twilio Client and Start Monitoring ---
        if account_sid and auth_token and twilio_number:
            try:
                client = Client(account_sid, auth_token)
                # Start the conversation monitor in a separate thread
                start_conversation_monitor(client, vector_store, llm)
                st.success("🟒 Monitoring new WhatsApp conversations...")
                st.info("⏳ Waiting for new messages...")
            except Exception as e:
                status_placeholder.error(f"Failed to initialize Twilio: {e}.  Check your credentials and network.")
                st.stop()
        else:
            st.warning("Twilio credentials not fully configured.  WhatsApp monitoring is disabled.")

        # --- Query Input and Response ---
        st.markdown("---")
        st.subheader("Ask a question about our documents:")

        if "messages" not in st.session_state:
            st.session_state.messages = []

        query = st.text_input("Enter your question:", key="query_input",
                            placeholder="e.g., 'What is the return policy?' or 'Status of order for John Doe?'")

        if st.button("Submit", key="submit_button"):
            if query:
                st.session_state.messages.append({"role": "user", "content": query})

                current_model_info = st.sidebar.empty()  # Placeholder for current mode info

                if "order" in query.lower() and (
                        "status" in query.lower() or "track" in query.lower() or "update" in query.lower() or any(
                        name_part.lower() in query.lower() for name_part in ["customer", "client", "name"])):
                    active_prompt_template = ORDER_STATUS_PROMPT
                    current_model_info.info("Mode: Order Status Query")
                else:
                    active_prompt_template = GENERAL_QA_PROMPT
                    current_model_info.info("Mode: General Query")

                rag_chain = get_rag_chain(llm, vector_store.as_retriever(search_kwargs={"k": 5}), active_prompt_template)

                with st.spinner("Thinking..."):
                    try:
                        response = rag_chain.invoke(query)
                        st.session_state.messages.append({"role": "assistant", "content": response})
                    except Exception as e:
                        st.error(f"Error during RAG chain invocation: {e}")
                        response = "Sorry, I encountered an error while processing your request."
                        st.session_state.messages.append({"role": "assistant", "content": response})
            else:
                st.warning("Please enter a question.")

        st.markdown("---")
        st.subheader("Response:")
        response_area = st.container()
        # Ensure response_area is robust against empty messages or incorrect last role
        last_assistant_message = "Ask a question to see the answer here."
        if st.session_state.messages and st.session_state.messages[-1]['role'] == 'assistant':
            last_assistant_message = st.session_state.messages[-1]['content']

        response_area.markdown(f"<div class='response-area'>{last_assistant_message}</div>",
                               unsafe_allow_html=True)

        st.sidebar.markdown("---")
        st.sidebar.markdown("Built with ❀️ using Streamlit & Langchain & Groq")



if __name__ == "__main__":
    main()