masadonline commited on
Commit
2699d0b
Β·
verified Β·
1 Parent(s): 9719b16

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +199 -126
app.py CHANGED
@@ -1,140 +1,213 @@
1
- import streamlit as st
2
  import os
3
- import tempfile
4
- import requests
5
- from PyPDF2 import PdfReader
 
 
 
 
6
  import faiss
7
  import numpy as np
8
- from transformers import AutoTokenizer
9
- import pdfplumber
10
  from groq import Groq
11
- import time
12
- import json
13
- from twilio.rest import Client
14
- from flask import Flask, request
15
- import threading
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
16
 
17
- # --- Configuration ---
18
- GROQ_API_KEY = os.getenv("GROQ_API_KEY")
19
- TWILIO_ACCOUNT_SID = os.getenv("TWILIO_ACCOUNT_SID")
20
- TWILIO_AUTH_TOKEN = os.getenv("TWILIO_AUTH_TOKEN")
21
- TWILIO_SERVICE_SID = os.getenv("TWILIO_SERVICE_SID")
22
-
23
- # --- Initialize Clients ---
24
- client = Groq(api_key=GROQ_API_KEY)
25
- twilio_client = Client(TWILIO_ACCOUNT_SID, TWILIO_AUTH_TOKEN)
26
- tokenizer = AutoTokenizer.from_pretrained("openai-community/gpt2")
27
-
28
- # --- Flask App for Webhook ---
29
- flask_app = Flask(__name__)
30
- latest_conversation_sid = None
31
- conversation_index = None
32
- chunk_store = {}
33
-
34
- # --- Helper Functions ---
35
- def extract_text_with_pdfplumber(pdf_path):
36
- full_text = ""
37
- with pdfplumber.open(pdf_path) as pdf:
38
- for page in pdf.pages:
39
- text = page.extract_text()
40
- if text:
41
- full_text += text + "\n"
42
- return full_text
43
-
44
- def chunk_text(text, chunk_size=256, chunk_overlap=64, max_tokens=512):
45
  tokens = tokenizer.tokenize(text)
46
  chunks = []
47
  start = 0
48
- while start < len(tokens):
49
- end = min(start + chunk_size, len(tokens))
50
- chunk_tokens = tokens[start:end]
51
- chunk_text = tokenizer.convert_tokens_to_string(chunk_tokens)
52
- if len(tokenizer.encode(chunk_text)) <= max_tokens:
53
- chunks.append(chunk_text.strip())
54
  start += chunk_size - chunk_overlap
55
  return chunks
56
 
57
- def get_embedding(text):
58
- # Fake embedding for example purposes
59
- return np.random.rand(384).astype("float32")
60
-
61
- def build_faiss_index(chunks):
62
- embeddings = [get_embedding(chunk) for chunk in chunks]
63
- dimension = embeddings[0].shape[0]
64
- index = faiss.IndexFlatL2(dimension)
65
- index.add(np.array(embeddings))
66
- return index, chunks
67
-
68
- def search_chunks(query, index, chunks, top_k=5):
69
- query_embedding = get_embedding(query)
70
- _, I = index.search(np.array([query_embedding]), top_k)
71
- return [chunks[i] for i in I[0]]
72
-
73
- def generate_answer_with_groq(query, context):
74
- prompt = f"""
75
- You are a helpful assistant. Use the following context to answer the question:
76
- {context}
77
- Question: {query}
78
- Answer:
79
- """
80
- response = client.chat.completions.create(
81
- messages=[{"role": "user", "content": prompt}],
82
- model="llama3-8b-8192"
83
  )
84
- return response.choices[0].message.content.strip()
85
-
86
- # --- Flask Webhook Endpoint ---
87
- @flask_app.route("/twilio-webhook", methods=['POST'])
88
- def whatsapp_webhook():
89
- global conversation_index, chunk_store
90
- data = request.form
91
- from_number = data['From']
92
- message = data['Body']
93
- conversation_sid = data.get('ConversationSid')
94
-
95
- if conversation_sid and conversation_index:
96
- context_chunks = search_chunks(message, conversation_index, chunk_store[conversation_sid])
97
- context = "\n".join(context_chunks)
98
- answer = generate_answer_with_groq(message, context)
99
- twilio_client.conversations.conversations(conversation_sid).messages.create(author="bot", body=answer)
100
- return "OK"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
101
 
102
  # --- Streamlit UI ---
103
- def streamlit_ui():
104
- global latest_conversation_sid, conversation_index, chunk_store
105
- st.title("Quasa – Smart WhatsApp Chatbot")
106
-
107
- uploaded_file = st.file_uploader("Upload a PDF document", type="pdf")
108
- if uploaded_file is not None:
109
- with tempfile.NamedTemporaryFile(delete=False, suffix=".pdf") as tmp_file:
110
- tmp_file.write(uploaded_file.read())
111
- pdf_path = tmp_file.name
112
-
113
- text = extract_text_with_pdfplumber(pdf_path)
114
- chunks = chunk_text(text)
115
- index, stored_chunks = build_faiss_index(chunks)
116
-
117
- # Get the last conversation SID from Twilio
118
- conversations = twilio_client.conversations.conversations.list(limit=1)
119
- if conversations:
120
- latest_conversation_sid = conversations[0].sid
121
- conversation_index = index
122
- chunk_store[latest_conversation_sid] = stored_chunks
123
- st.success(f"PDF indexed and ready for conversation. Conversation SID: {latest_conversation_sid}")
124
- else:
125
- st.error("No active Twilio conversations found.")
126
-
127
- st.markdown("---")
128
- st.markdown("**Waiting for incoming WhatsApp messages...**")
129
-
130
- # --- Run Flask in Background Thread ---
131
- def run_flask():
132
- flask_app.run(host="0.0.0.0", port=5000)
133
-
134
- flask_thread = threading.Thread(target=run_flask)
135
- flask_thread.daemon = True
136
- flask_thread.start()
137
-
138
- # --- Start Streamlit App ---
139
- if __name__ == "__main__":
140
- streamlit_ui()
 
 
1
  import os
2
+ import time
3
+ import threading
4
+ import streamlit as st
5
+ from twilio.rest import Client
6
+ from sentence_transformers import SentenceTransformer
7
+ from transformers import AutoTokenizer
8
+
9
  import faiss
10
  import numpy as np
11
+ import docx
12
+
13
  from groq import Groq
14
+ import requests
15
+ from io import StringIO
16
+ from pdfminer.high_level import extract_text_to_fp
17
+ from pdfminer.layout import LAParams
18
+
19
+ # --- PDF Extraction (Improved for Tables & Paragraphs) ---
20
+ def extract_text_from_pdf(pdf_path):
21
+ output_string = StringIO()
22
+ with open(pdf_path, 'rb') as file:
23
+ extract_text_to_fp(file, output_string, laparams=LAParams(), output_type='text', codec=None)
24
+ return output_string.getvalue()
25
+
26
+ def clean_extracted_text(text):
27
+ lines = text.splitlines()
28
+ cleaned = []
29
+ for line in lines:
30
+ line = line.strip()
31
+ if line:
32
+ line = ' '.join(line.split()) # remove extra spaces
33
+ cleaned.append(line)
34
+ return '\n'.join(cleaned)
35
+
36
+ # --- DOCX Extraction ---
37
+ def extract_text_from_docx(docx_path):
38
+ try:
39
+ doc = docx.Document(docx_path)
40
+ return '\n'.join(para.text for para in doc.paragraphs)
41
+ except:
42
+ return ""
43
+
44
+ # --- Chunking & Retrieval ---
45
+ def chunk_text(text, tokenizer, chunk_size=128, chunk_overlap=32, max_tokens=512):
46
+
47
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
48
  tokens = tokenizer.tokenize(text)
49
  chunks = []
50
  start = 0
 
 
 
 
 
 
51
  start += chunk_size - chunk_overlap
52
  return chunks
53
 
54
+ def retrieve_chunks(question, index, embed_model, text_chunks, k=3):
55
+ question_embedding = embed_model.encode(question)
56
+ D, I = index.search(np.array([question_embedding]), k)
57
+ return [text_chunks[i] for i in I[0]]
58
+
59
+ # --- Groq Answer Generator ---
60
+ def generate_answer_with_groq(question, context):
61
+ url = "https://api.groq.com/openai/v1/chat/completions"
62
+ api_key = os.environ.get("GROQ_API_KEY")
63
+ headers = {
64
+ "Authorization": f"Bearer {api_key}",
65
+ "Content-Type": "application/json",
66
+ }
67
+ prompt = (
68
+ f"Customer asked: '{question}'\n\n"
69
+ f"Here is the relevant product or policy info to help:\n{context}\n\n"
70
+ f"Respond in a friendly and helpful tone as a toy shop support agent."
 
 
 
 
 
 
 
 
 
71
  )
72
+ payload = {
73
+ "model": "llama3-8b-8192",
74
+ "messages": [
75
+ {
76
+ "role": "system",
77
+ "content": (
78
+ "You are ToyBot, a friendly and helpful WhatsApp assistant for an online toy shop. "
79
+ "Your goal is to politely answer customer questions, help them choose the right toys, "
80
+ "provide order or delivery information, explain return policies, and guide them through purchases."
81
+ )
82
+ },
83
+ {"role": "user", "content": prompt},
84
+ ],
85
+ "temperature": 0.5,
86
+ "max_tokens": 300,
87
+ }
88
+ response = requests.post(url, headers=headers, json=payload)
89
+ response.raise_for_status()
90
+ return response.json()['choices'][0]['message']['content'].strip()
91
+
92
+ # --- Twilio Functions ---
93
+ def get_whatsapp_conversation_sids(client):
94
+ sids = []
95
+ conversations = client.conversations.v1.conversations.list(limit=50)
96
+ for convo in conversations:
97
+ try:
98
+ participants = client.conversations.v1.conversations(convo.sid).participants.list()
99
+ for p in participants:
100
+ if (p.identity and p.identity.startswith("whatsapp:")) or (
101
+ p.messaging_binding and p.messaging_binding.get("address", "").startswith("whatsapp:")
102
+ ):
103
+ sids.append(convo.sid)
104
+ break
105
+ except:
106
+ continue
107
+ return sids
108
+
109
+ def fetch_latest_incoming_message(client, conversation_sid):
110
+ messages = client.conversations.v1.conversations(conversation_sid).messages.list(limit=10)
111
+ for msg in reversed(messages):
112
+ if msg.author.startswith("whatsapp:"):
113
+ return {
114
+ "sid": msg.sid,
115
+ "body": msg.body,
116
+ "author": msg.author,
117
+ "timestamp": msg.date_created,
118
+ }
119
+ return None
120
+
121
+ def send_twilio_message(client, conversation_sid, body):
122
+ return client.conversations.v1.conversations(conversation_sid).messages.create(
123
+ author="system", body=body
124
+ )
125
+
126
+ # --- Load Knowledge Base ---
127
+ def setup_knowledge_base():
128
+ folder_path = "docs"
129
+ all_text = ""
130
+ for file in os.listdir(folder_path):
131
+ path = os.path.join(folder_path, file)
132
+ if file.endswith(".pdf"):
133
+ raw_text = extract_text_from_pdf(path)
134
+ all_text += clean_extracted_text(raw_text) + "\n"
135
+ elif file.endswith((".docx", ".doc")):
136
+ all_text += extract_text_from_docx(path) + "\n"
137
+
138
+ tokenizer = AutoTokenizer.from_pretrained('bert-base-uncased')
139
+ chunks = chunk_text(all_text, tokenizer)
140
+ model = SentenceTransformer('all-mpnet-base-v2')
141
+ embeddings = model.encode(chunks, truncate=True, show_progress_bar=False)
142
+ dim = embeddings[0].shape[0]
143
+ index = faiss.IndexFlatL2(dim)
144
+ index.add(np.array(embeddings).astype('float32'))
145
+ return index, model, chunks
146
+
147
+ # --- Monitor Conversations ---
148
+ def start_conversation_monitor(client, index, embed_model, text_chunks):
149
+ monitored_sids = set()
150
+
151
+ def poll_conversation(convo_sid):
152
+ last_processed_timestamp = None
153
+ while True:
154
+ try:
155
+ latest_msg = fetch_latest_incoming_message(client, convo_sid)
156
+ if latest_msg:
157
+ msg_time = latest_msg["timestamp"]
158
+ if last_processed_timestamp is None or msg_time > last_processed_timestamp:
159
+ last_processed_timestamp = msg_time
160
+ question = latest_msg["body"]
161
+ sender = latest_msg["author"]
162
+ print(f"\nπŸ“₯ New message from {sender} in {convo_sid}: {question}")
163
+ context = "\n\n".join(retrieve_chunks(question, index, embed_model, text_chunks))
164
+ answer = generate_answer_with_groq(question, context)
165
+ send_twilio_message(client, convo_sid, answer)
166
+ print(f"πŸ“€ Replied to {sender}: {answer}")
167
+ time.sleep(3)
168
+ except Exception as e:
169
+ print(f"❌ Error in convo {convo_sid} polling:", e)
170
+ time.sleep(5)
171
+
172
+ def monitor_all_conversations():
173
+ while True:
174
+ try:
175
+ current_sids = set(get_whatsapp_conversation_sids(client))
176
+ new_sids = current_sids - monitored_sids
177
+ for sid in new_sids:
178
+ print(f"➑️ Monitoring new conversation: {sid}")
179
+ monitored_sids.add(sid)
180
+ threading.Thread(target=poll_conversation, args=(sid,), daemon=True).start()
181
+ time.sleep(15)
182
+ except Exception as e:
183
+ print("❌ Error in conversation monitoring loop:", e)
184
+ time.sleep(15)
185
+
186
+ threading.Thread(target=monitor_all_conversations, daemon=True).start()
187
 
188
  # --- Streamlit UI ---
189
+ st.set_page_config(page_title="Quasa – A Smart WhatsApp Chatbot", layout="wide")
190
+ st.title("πŸ“± Quasa – A Smart WhatsApp Chatbot")
191
+
192
+ account_sid = st.secrets.get("TWILIO_SID")
193
+ auth_token = st.secrets.get("TWILIO_TOKEN")
194
+ GROQ_API_KEY = st.secrets.get("GROQ_API_KEY")
195
+
196
+ if not all([account_sid, auth_token, GROQ_API_KEY]):
197
+ st.warning("⚠️ Provide all credentials below:")
198
+ account_sid = st.text_input("Twilio SID", value=account_sid or "")
199
+ auth_token = st.text_input("Twilio Token", type="password", value=auth_token or "")
200
+ GROQ_API_KEY = st.text_input("GROQ API Key", type="password", value=GROQ_API_KEY or "")
201
+
202
+ if all([account_sid, auth_token, GROQ_API_KEY]):
203
+ os.environ["GROQ_API_KEY"] = GROQ_API_KEY
204
+ client = Client(account_sid, auth_token)
205
+ conversation_sids = get_whatsapp_conversation_sids(client)
206
+
207
+ if conversation_sids:
208
+ st.success(f"βœ… {len(conversation_sids)} WhatsApp conversation(s) found. Initializing chatbot...")
209
+ index, model, chunks = setup_knowledge_base()
210
+ start_conversation_monitor(client, index, model, chunks)
211
+ st.success("🟒 Chatbot is running in background and will reply to new messages.")
212
+ else:
213
+ st.error("❌ No WhatsApp conversations found.")