File size: 7,074 Bytes
19cd752
 
60c8a15
 
19cd752
6bda95c
717234d
60c8a15
 
 
 
 
 
e992967
60c8a15
 
b036db9
19cd752
60c8a15
 
 
 
19cd752
 
c0270e5
 
 
60c8a15
0ee59bd
60c8a15
 
 
 
 
 
0ee59bd
60c8a15
 
19cd752
60c8a15
 
 
 
 
 
 
 
 
 
 
19cd752
 
60c8a15
 
19cd752
 
60c8a15
7ef0563
60c8a15
 
 
 
7ef0563
 
 
 
 
60c8a15
 
 
7ef0563
 
 
 
 
 
 
 
60c8a15
 
 
 
 
19cd752
 
 
60c8a15
19cd752
31f1016
 
 
b94cf70
 
 
19cd752
 
 
b94cf70
19cd752
b94cf70
31f1016
 
19cd752
 
 
 
 
 
 
 
 
 
 
b94cf70
19cd752
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f51c85c
 
 
e992967
 
 
 
31f1016
19cd752
c0270e5
19cd752
c0270e5
e992967
31f1016
e992967
31f1016
 
 
19cd752
 
 
 
 
 
31f1016
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
# app.py

import os
import time
import threading
import streamlit as st
from twilio.rest import Client
from pdfminer.high_level import extract_text
from sentence_transformers import SentenceTransformer
from transformers import AutoTokenizer
import faiss
import numpy as np
import docx
from groq import Groq
import PyPDF2
import requests

# --- Text Extraction Utilities ---
def extract_text_from_pdf(pdf_path):
    try:
        text = ""
        with open(pdf_path, 'rb') as file:
            reader = PyPDF2.PdfReader(file)
            for page in reader.pages:
                page_text = page.extract_text()
                if page_text:
                    text += page_text
        return text
    except:
        return extract_text(pdf_path)

def extract_text_from_docx(docx_path):
    try:
        doc = docx.Document(docx_path)
        return '\n'.join(para.text for para in doc.paragraphs)
    except:
        return ""

# --- Chunking & Retrieval ---
def chunk_text(text, tokenizer, chunk_size=150, chunk_overlap=30):
    tokens = tokenizer.tokenize(text)
    chunks, start = [], 0
    while start < len(tokens):
        end = min(start + chunk_size, len(tokens))
        chunk_tokens = tokens[start:end]
        chunks.append(tokenizer.convert_tokens_to_string(chunk_tokens))
        start += chunk_size - chunk_overlap
    return chunks

def retrieve_chunks(question, index, embed_model, text_chunks, k=3):
    q_embedding = embed_model.encode([question])[0]
    D, I = index.search(np.array([q_embedding]), k)
    return [text_chunks[i] for i in I[0]]

# --- Groq Answer Generator ---
def generate_answer_with_groq(question, context):
    url = "https://api.groq.com/openai/v1/chat/completions"
    api_key = os.environ.get("GROQ_API_KEY")
    headers = {
        "Authorization": f"Bearer {api_key}",
        "Content-Type": "application/json",
    }
    prompt = (
        f"Customer asked: '{question}'\n\n"
        f"Here is the relevant product or policy info to help:\n{context}\n\n"
        f"Respond in a friendly and helpful tone as a toy shop support agent."
    )
    payload = {
        "model": "llama3-8b-8192",
        "messages": [
            {
                "role": "system",
                "content": (
                    "You are ToyBot, a friendly and helpful WhatsApp assistant for an online toy shop. "
                    "Your goal is to politely answer customer questions, help them choose the right toys, "
                    "provide order or delivery information, explain return policies, and guide them through purchases. "
                )
            },
            {"role": "user", "content": prompt},
        ],
        "temperature": 0.5,
        "max_tokens": 300,
    }
    response = requests.post(url, headers=headers, json=payload)
    response.raise_for_status()
    return response.json()['choices'][0]['message']['content'].strip()

# --- Twilio Functions ---
def get_latest_whatsapp_conversation_sid(client):
    conversations = client.conversations.v1.conversations.list(limit=20)
    for convo in conversations:
        try:
            participants = client.conversations.v1.conversations(convo.sid).participants.list()
            for p in participants:
                if (p.identity and p.identity.startswith("whatsapp:")) or (
                    p.messaging_binding and p.messaging_binding.get("address", "").startswith("whatsapp:")
                ):
                    return convo.sid
        except:
            continue
    return conversations[0].sid if conversations else None

def fetch_latest_incoming_message(client, conversation_sid):
    messages = client.conversations.v1.conversations(conversation_sid).messages.list(limit=10)
    for msg in reversed(messages):
        if msg.author.startswith("whatsapp:"):
            return msg.body, msg.author, msg.index
    return None, None, None

def send_twilio_message(client, conversation_sid, body):
    return client.conversations.v1.conversations(conversation_sid).messages.create(
        author="system", body=body
    )

# --- Load Knowledge Base ---
def setup_knowledge_base():
    folder_path = "docs"
    all_text = ""
    for file in os.listdir(folder_path):
        path = os.path.join(folder_path, file)
        if file.endswith(".pdf"):
            all_text += extract_text_from_pdf(path) + "\n"
        elif file.endswith((".docx", ".doc")):
            all_text += extract_text_from_docx(path) + "\n"
    tokenizer = AutoTokenizer.from_pretrained('bert-base-uncased')
    chunks = chunk_text(all_text, tokenizer)
    model = SentenceTransformer('all-mpnet-base-v2')
    embeddings = model.encode(chunks)
    dim = embeddings[0].shape[0]
    index = faiss.IndexFlatL2(dim)
    index.add(np.array(embeddings).astype('float32'))
    return index, model, chunks

# --- Background Polling Thread ---
def start_message_monitor(client, convo_sid, index, embed_model, text_chunks):
    last_index = -1

    def poll_loop():
        nonlocal last_index
        while True:
            try:
                question, sender, msg_index = fetch_latest_incoming_message(client, convo_sid)
                if question and msg_index > last_index:
                    last_index = msg_index
                    print(f"\nπŸ“₯ New Message from {sender}: {question}")
                    context = "\n\n".join(retrieve_chunks(question, index, embed_model, text_chunks))
                    answer = generate_answer_with_groq(question, context)
                    send_twilio_message(client, convo_sid, answer)
                    print(f"πŸ“€ Sent Reply: {answer}")
                time.sleep(3)
            except Exception as e:
                print("❌ Error in polling loop:", e)
                time.sleep(5)

    thread = threading.Thread(target=poll_loop, daemon=True)
    thread.start()

# --- Streamlit UI ---
st.set_page_config(page_title="Quasa – A Smart WhatsApp Chatbot", layout="wide")
st.title("πŸ“± Quasa – A Smart WhatsApp Chatbot")

account_sid = st.secrets.get("TWILIO_SID")
auth_token = st.secrets.get("TWILIO_TOKEN")
GROQ_API_KEY = st.secrets.get("GROQ_API_KEY")

if not all([account_sid, auth_token, GROQ_API_KEY]):
    st.warning("⚠️ Provide all credentials below:")
    account_sid = st.text_input("Twilio SID", value=account_sid or "")
    auth_token = st.text_input("Twilio Token", type="password", value=auth_token or "")
    GROQ_API_KEY = st.text_input("GROQ API Key", type="password", value=GROQ_API_KEY or "")

if all([account_sid, auth_token, GROQ_API_KEY]):
    os.environ["GROQ_API_KEY"] = GROQ_API_KEY
    client = Client(account_sid, auth_token)
    conversation_sid = get_latest_whatsapp_conversation_sid(client)

    if conversation_sid:
        st.success("βœ… WhatsApp connected. Initializing chatbot...")
        index, model, chunks = setup_knowledge_base()
        start_message_monitor(client, conversation_sid, index, model, chunks)
        st.success("🟒 Chatbot is running in background and will reply automatically.")
    else:
        st.error("❌ No WhatsApp conversation found.")