masadonline commited on
Commit
f91ccef
·
verified ·
1 Parent(s): ce4e9d7

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +121 -196
app.py CHANGED
@@ -1,46 +1,47 @@
1
- import os
2
- import time
3
- import threading
4
  import streamlit as st
5
- from twilio.rest import Client
6
- from sentence_transformers import SentenceTransformer
7
- from transformers import AutoTokenizer
 
8
  import faiss
9
  import numpy as np
10
- import docx
 
11
  from groq import Groq
12
- import requests
13
- from io import StringIO
14
- from pdfminer.high_level import extract_text_to_fp
15
- from pdfminer.layout import LAParams
16
-
17
- # --- PDF Extraction (Improved for Tables & Paragraphs) ---
18
- def extract_text_from_pdf(pdf_path):
19
- output_string = StringIO()
20
- with open(pdf_path, 'rb') as file:
21
- extract_text_to_fp(file, output_string, laparams=LAParams(), output_type='text', codec=None)
22
- return output_string.getvalue()
23
-
24
- def clean_extracted_text(text):
25
- lines = text.splitlines()
26
- cleaned = []
27
- for line in lines:
28
- line = line.strip()
29
- if line:
30
- line = ' '.join(line.split()) # remove extra spaces
31
- cleaned.append(line)
32
- return '\n'.join(cleaned)
33
-
34
- # --- DOCX Extraction ---
35
- def extract_text_from_docx(docx_path):
36
- try:
37
- doc = docx.Document(docx_path)
38
- return '\n'.join(para.text for para in doc.paragraphs)
39
- except:
40
- return ""
41
-
42
- # --- Chunking & Retrieval ---
43
- def chunk_text(text, tokenizer, chunk_size=128, chunk_overlap=32, max_tokens=512):
 
 
44
  tokens = tokenizer.tokenize(text)
45
  chunks = []
46
  start = 0
@@ -53,163 +54,87 @@ def chunk_text(text, tokenizer, chunk_size=128, chunk_overlap=32, max_tokens=512
53
  start += chunk_size - chunk_overlap
54
  return chunks
55
 
56
- def retrieve_chunks(question, index, embed_model, text_chunks, k=3):
57
- question_embedding = embed_model.encode(question)
58
- D, I = index.search(np.array([question_embedding]), k)
59
- return [text_chunks[i] for i in I[0]]
60
-
61
- # --- Groq Answer Generator ---
62
- def generate_answer_with_groq(question, context):
63
- url = "https://api.groq.com/openai/v1/chat/completions"
64
- api_key = os.environ.get("GROQ_API_KEY")
65
- headers = {
66
- "Authorization": f"Bearer {api_key}",
67
- "Content-Type": "application/json",
68
- }
69
- prompt = (
70
- f"Customer asked: '{question}'\n\n"
71
- f"Here is the relevant product or policy info to help:\n{context}\n\n"
72
- f"Respond in a friendly and helpful tone as a toy shop support agent."
73
- )
74
- payload = {
75
- "model": "llama3-8b-8192",
76
- "messages": [
77
- {
78
- "role": "system",
79
- "content": (
80
- "You are ToyBot, a friendly and helpful WhatsApp assistant for an online toy shop. "
81
- "Your goal is to politely answer customer questions, help them choose the right toys, "
82
- "provide order or delivery information, explain return policies, and guide them through purchases."
83
- )
84
- },
85
- {"role": "user", "content": prompt},
86
- ],
87
- "temperature": 0.5,
88
- "max_tokens": 300,
89
- }
90
- response = requests.post(url, headers=headers, json=payload)
91
- response.raise_for_status()
92
- return response.json()['choices'][0]['message']['content'].strip()
93
-
94
- # --- Twilio Functions ---
95
- def get_whatsapp_conversation_sids(client):
96
- sids = []
97
- conversations = client.conversations.v1.conversations.list(limit=50)
98
- for convo in conversations:
99
- try:
100
- participants = client.conversations.v1.conversations(convo.sid).participants.list()
101
- for p in participants:
102
- if (p.identity and p.identity.startswith("whatsapp:")) or (
103
- p.messaging_binding and p.messaging_binding.get("address", "").startswith("whatsapp:")
104
- ):
105
- sids.append(convo.sid)
106
- break
107
- except:
108
- continue
109
- return sids
110
-
111
- def fetch_latest_incoming_message(client, conversation_sid):
112
- messages = client.conversations.v1.conversations(conversation_sid).messages.list(limit=10)
113
- for msg in reversed(messages):
114
- if msg.author.startswith("whatsapp:"):
115
- return {
116
- "sid": msg.sid,
117
- "body": msg.body,
118
- "author": msg.author,
119
- "timestamp": msg.date_created,
120
- }
121
- return None
122
-
123
- def send_twilio_message(client, conversation_sid, body):
124
- return client.conversations.v1.conversations(conversation_sid).messages.create(
125
- author="system", body=body
126
  )
127
-
128
- # --- Load Knowledge Base ---
129
- def setup_knowledge_base():
130
- folder_path = "docs"
131
- all_text = ""
132
- for file in os.listdir(folder_path):
133
- path = os.path.join(folder_path, file)
134
- if file.endswith(".pdf"):
135
- raw_text = extract_text_from_pdf(path)
136
- all_text += clean_extracted_text(raw_text) + "\n"
137
- elif file.endswith((".docx", ".doc")):
138
- all_text += extract_text_from_docx(path) + "\n"
139
-
140
- tokenizer = AutoTokenizer.from_pretrained('bert-base-uncased')
141
- chunks = chunk_text(all_text, tokenizer)
142
- model = SentenceTransformer('all-mpnet-base-v2')
143
- embeddings = model.encode(chunks, truncate=True, show_progress_bar=False)
144
- dim = embeddings[0].shape[0]
145
- index = faiss.IndexFlatL2(dim)
146
- index.add(np.array(embeddings).astype('float32'))
147
- return index, model, chunks
148
-
149
- # --- Monitor Conversations ---
150
- def start_conversation_monitor(client, index, embed_model, text_chunks):
151
- monitored_sids = set()
152
-
153
- def poll_conversation(convo_sid):
154
- last_processed_timestamp = None
155
- while True:
156
- try:
157
- latest_msg = fetch_latest_incoming_message(client, convo_sid)
158
- if latest_msg:
159
- msg_time = latest_msg["timestamp"]
160
- if last_processed_timestamp is None or msg_time > last_processed_timestamp:
161
- last_processed_timestamp = msg_time
162
- question = latest_msg["body"]
163
- sender = latest_msg["author"]
164
- print(f"\n📥 New message from {sender} in {convo_sid}: {question}")
165
- context = "\n\n".join(retrieve_chunks(question, index, embed_model, text_chunks))
166
- answer = generate_answer_with_groq(question, context)
167
- send_twilio_message(client, convo_sid, answer)
168
- print(f"📤 Replied to {sender}: {answer}")
169
- time.sleep(3)
170
- except Exception as e:
171
- print(f"❌ Error in convo {convo_sid} polling:", e)
172
- time.sleep(5)
173
-
174
- def monitor_all_conversations():
175
- while True:
176
- try:
177
- current_sids = set(get_whatsapp_conversation_sids(client))
178
- new_sids = current_sids - monitored_sids
179
- for sid in new_sids:
180
- print(f"➡️ Monitoring new conversation: {sid}")
181
- monitored_sids.add(sid)
182
- threading.Thread(target=poll_conversation, args=(sid,), daemon=True).start()
183
- time.sleep(15)
184
- except Exception as e:
185
- print("❌ Error in conversation monitoring loop:", e)
186
- time.sleep(15)
187
-
188
- threading.Thread(target=monitor_all_conversations, daemon=True).start()
189
 
190
  # --- Streamlit UI ---
191
- st.set_page_config(page_title="Quasa – A Smart WhatsApp Chatbot", layout="wide")
192
- st.title("📱 Quasa A Smart WhatsApp Chatbot")
193
-
194
- account_sid = st.secrets.get("TWILIO_SID")
195
- auth_token = st.secrets.get("TWILIO_TOKEN")
196
- GROQ_API_KEY = st.secrets.get("GROQ_API_KEY")
197
-
198
- if not all([account_sid, auth_token, GROQ_API_KEY]):
199
- st.warning("⚠️ Provide all credentials below:")
200
- account_sid = st.text_input("Twilio SID", value=account_sid or "")
201
- auth_token = st.text_input("Twilio Token", type="password", value=auth_token or "")
202
- GROQ_API_KEY = st.text_input("GROQ API Key", type="password", value=GROQ_API_KEY or "")
203
-
204
- if all([account_sid, auth_token, GROQ_API_KEY]):
205
- os.environ["GROQ_API_KEY"] = GROQ_API_KEY
206
- client = Client(account_sid, auth_token)
207
- conversation_sids = get_whatsapp_conversation_sids(client)
208
-
209
- if conversation_sids:
210
- st.success(f"✅ {len(conversation_sids)} WhatsApp conversation(s) found. Initializing chatbot...")
211
- index, model, chunks = setup_knowledge_base()
212
- start_conversation_monitor(client, index, model, chunks)
213
- st.success("🟢 Chatbot is running in background and will reply to new messages.")
214
- else:
215
- st.error("❌ No WhatsApp conversations found.")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import streamlit as st
2
+ import os
3
+ import tempfile
4
+ import requests
5
+ from PyPDF2 import PdfReader
6
  import faiss
7
  import numpy as np
8
+ from transformers import AutoTokenizer
9
+ import pdfplumber
10
  from groq import Groq
11
+ import time
12
+ import json
13
+ from twilio.rest import Client
14
+ from flask import Flask, request
15
+ import threading
16
+
17
+ # --- Configuration ---
18
+ GROQ_API_KEY = os.getenv("GROQ_API_KEY")
19
+ TWILIO_ACCOUNT_SID = os.getenv("TWILIO_ACCOUNT_SID")
20
+ TWILIO_AUTH_TOKEN = os.getenv("TWILIO_AUTH_TOKEN")
21
+ TWILIO_SERVICE_SID = os.getenv("TWILIO_SERVICE_SID")
22
+
23
+ # --- Initialize Clients ---
24
+ client = Groq(api_key=GROQ_API_KEY)
25
+ twilio_client = Client(TWILIO_ACCOUNT_SID, TWILIO_AUTH_TOKEN)
26
+ tokenizer = AutoTokenizer.from_pretrained("openai-community/gpt2")
27
+
28
+ # --- Flask App for Webhook ---
29
+ flask_app = Flask(__name__)
30
+ latest_conversation_sid = None
31
+ conversation_index = None
32
+ chunk_store = {}
33
+
34
+ # --- Helper Functions ---
35
+ def extract_text_with_pdfplumber(pdf_path):
36
+ full_text = ""
37
+ with pdfplumber.open(pdf_path) as pdf:
38
+ for page in pdf.pages:
39
+ text = page.extract_text()
40
+ if text:
41
+ full_text += text + "\n"
42
+ return full_text
43
+
44
+ def chunk_text(text, chunk_size=256, chunk_overlap=64, max_tokens=512):
45
  tokens = tokenizer.tokenize(text)
46
  chunks = []
47
  start = 0
 
54
  start += chunk_size - chunk_overlap
55
  return chunks
56
 
57
+ def get_embedding(text):
58
+ # Fake embedding for example purposes
59
+ return np.random.rand(384).astype("float32")
60
+
61
+ def build_faiss_index(chunks):
62
+ embeddings = [get_embedding(chunk) for chunk in chunks]
63
+ dimension = embeddings[0].shape[0]
64
+ index = faiss.IndexFlatL2(dimension)
65
+ index.add(np.array(embeddings))
66
+ return index, chunks
67
+
68
+ def search_chunks(query, index, chunks, top_k=5):
69
+ query_embedding = get_embedding(query)
70
+ _, I = index.search(np.array([query_embedding]), top_k)
71
+ return [chunks[i] for i in I[0]]
72
+
73
+ def generate_answer_with_groq(query, context):
74
+ prompt = f"""
75
+ You are a helpful assistant. Use the following context to answer the question:
76
+ {context}
77
+ Question: {query}
78
+ Answer:
79
+ """
80
+ response = client.chat.completions.create(
81
+ messages=[{"role": "user", "content": prompt}],
82
+ model="llama3-8b-8192"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
83
  )
84
+ return response.choices[0].message.content.strip()
85
+
86
+ # --- Flask Webhook Endpoint ---
87
+ @flask_app.route("/twilio-webhook", methods=['POST'])
88
+ def whatsapp_webhook():
89
+ global conversation_index, chunk_store
90
+ data = request.form
91
+ from_number = data['From']
92
+ message = data['Body']
93
+ conversation_sid = data.get('ConversationSid')
94
+
95
+ if conversation_sid and conversation_index:
96
+ context_chunks = search_chunks(message, conversation_index, chunk_store[conversation_sid])
97
+ context = "\n".join(context_chunks)
98
+ answer = generate_answer_with_groq(message, context)
99
+ twilio_client.conversations.conversations(conversation_sid).messages.create(author="bot", body=answer)
100
+ return "OK"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
101
 
102
  # --- Streamlit UI ---
103
+ def streamlit_ui():
104
+ global latest_conversation_sid, conversation_index, chunk_store
105
+ st.title("Quasa – Smart WhatsApp Chatbot")
106
+
107
+ uploaded_file = st.file_uploader("Upload a PDF document", type="pdf")
108
+ if uploaded_file is not None:
109
+ with tempfile.NamedTemporaryFile(delete=False, suffix=".pdf") as tmp_file:
110
+ tmp_file.write(uploaded_file.read())
111
+ pdf_path = tmp_file.name
112
+
113
+ text = extract_text_with_pdfplumber(pdf_path)
114
+ chunks = chunk_text(text)
115
+ index, stored_chunks = build_faiss_index(chunks)
116
+
117
+ # Get the last conversation SID from Twilio
118
+ conversations = twilio_client.conversations.conversations.list(limit=1)
119
+ if conversations:
120
+ latest_conversation_sid = conversations[0].sid
121
+ conversation_index = index
122
+ chunk_store[latest_conversation_sid] = stored_chunks
123
+ st.success(f"PDF indexed and ready for conversation. Conversation SID: {latest_conversation_sid}")
124
+ else:
125
+ st.error("No active Twilio conversations found.")
126
+
127
+ st.markdown("---")
128
+ st.markdown("**Waiting for incoming WhatsApp messages...**")
129
+
130
+ # --- Run Flask in Background Thread ---
131
+ def run_flask():
132
+ flask_app.run(host="0.0.0.0", port=5000)
133
+
134
+ flask_thread = threading.Thread(target=run_flask)
135
+ flask_thread.daemon = True
136
+ flask_thread.start()
137
+
138
+ # --- Start Streamlit App ---
139
+ if __name__ == "__main__":
140
+ streamlit_ui()