masadonline commited on
Commit
9590248
Β·
verified Β·
1 Parent(s): b089ae9

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +164 -106
app.py CHANGED
@@ -1,35 +1,38 @@
1
  import os
2
- import json
3
  import time
4
  import threading
5
- import datetime
6
- import csv
7
- import docx
8
  import streamlit as st
9
- from io import StringIO
 
 
 
10
  import numpy as np
 
 
11
  import requests
12
- import pdfplumber
13
  from pdfminer.high_level import extract_text_to_fp
14
  from pdfminer.layout import LAParams
15
- from sentence_transformers import SentenceTransformer
16
- from transformers import AutoTokenizer
17
- import faiss
18
- from twilio.rest import Client
19
  from twilio.base.exceptions import TwilioRestException
 
 
 
 
 
20
 
21
  APP_START_TIME = datetime.datetime.now(datetime.timezone.utc)
22
  os.environ["PYTORCH_JIT"] = "0"
23
 
24
- DOCS_FOLDER = "./docs"
25
-
26
- # ---------------- PDF / DOCX / JSON LOADERS ----------------
27
  def _extract_tables_from_page(page):
28
  tables = page.extract_tables()
29
  formatted_tables = []
30
  for table in tables:
31
- formatted_row = [[cell if cell else "" for cell in row] for row in table]
32
- formatted_tables.append(formatted_row)
 
 
 
33
  return formatted_tables
34
 
35
  def extract_text_from_pdf(pdf_path):
@@ -43,39 +46,55 @@ def extract_text_from_pdf(pdf_path):
43
  if text:
44
  text_output.write(text + "\n\n")
45
  except Exception as e:
46
- print(f"[pdfplumber error] Falling back: {e}")
47
  with open(pdf_path, 'rb') as file:
48
  extract_text_to_fp(file, text_output, laparams=LAParams(), output_type='text')
49
  return text_output.getvalue(), all_tables
50
 
 
 
 
 
 
 
 
 
 
 
 
 
51
  def extract_text_from_docx(docx_path):
52
  try:
53
  doc = docx.Document(docx_path)
54
- return "\n".join(para.text for para in doc.paragraphs)
55
- except Exception as e:
56
- print(f"[DOCX error] {e}")
57
  return ""
58
 
59
  def load_json_data(json_path):
60
  try:
61
  with open(json_path, 'r', encoding='utf-8') as f:
62
  data = json.load(f)
63
- if isinstance(data, list):
64
- return "\n\n".join("\n".join(f"{k}: {v}" for k, v in d.items()) for d in data if isinstance(d, dict))
65
  if isinstance(data, dict):
66
- return "\n".join(f"{k}: {v}" for k, v in data.items())
67
- return str(data)
 
 
 
 
 
 
 
 
 
68
  except Exception as e:
69
- print(f"[JSON error] {e}")
70
  return ""
71
 
72
- def clean_extracted_text(text):
73
- return '\n'.join(' '.join(line.strip().split()) for line in text.splitlines() if line.strip())
74
-
75
- # ---------------- CHUNKING ----------------
76
  def chunk_text(text, tokenizer, chunk_size=128, chunk_overlap=32):
77
  tokens = tokenizer.tokenize(text)
78
- chunks, start = [], 0
 
79
  while start < len(tokens):
80
  end = min(start + chunk_size, len(tokens))
81
  chunk = tokens[start:end]
@@ -87,9 +106,9 @@ def chunk_text(text, tokenizer, chunk_size=128, chunk_overlap=32):
87
  def retrieve_chunks(question, index, embed_model, text_chunks, k=3):
88
  q_embedding = embed_model.encode(question)
89
  D, I = index.search(np.array([q_embedding]), k)
90
- return [text_chunks[i] for i in I[0] if i < len(text_chunks)]
91
 
92
- # ---------------- GROQ ANSWER GENERATOR ----------------
93
  def generate_answer_with_groq(question, context):
94
  url = "https://api.groq.com/openai/v1/chat/completions"
95
  api_key = os.environ.get("GROQ_API_KEY")
@@ -97,92 +116,131 @@ def generate_answer_with_groq(question, context):
97
  "Authorization": f"Bearer {api_key}",
98
  "Content-Type": "application/json",
99
  }
100
-
101
  prompt = (
102
  f"Customer asked: '{question}'\n\n"
103
- f"Here is the relevant information:\n{context}\n\n"
104
- "Reply as a friendly toy shop assistant, include customer name from the context if possible."
 
105
  )
106
-
107
  payload = {
108
  "model": "llama3-8b-8192",
109
  "messages": [
110
  {
111
  "role": "system",
112
- "content": "You are ToyBot, a helpful assistant for an online toy shop."
 
 
 
 
 
 
113
  },
114
- {"role": "user", "content": prompt}
115
- ]
 
 
116
  }
117
-
118
  response = requests.post(url, headers=headers, json=payload)
119
- return response.json()["choices"][0]["message"]["content"]
120
-
121
- # ---------------- TWILIO MONITOR ----------------
122
- def handle_incoming_messages(index, embed_model, tokenizer, text_chunks):
123
- client = Client(os.environ["TWILIO_ACCOUNT_SID"], os.environ["TWILIO_AUTH_TOKEN"])
124
- conversation_sid = os.environ.get("TWILIO_CONVERSATION_SID")
125
-
126
- if not conversation_sid:
127
- print("❌ TWILIO_CONVERSATION_SID not set")
128
- return
129
-
130
- last_check_time = APP_START_TIME
131
-
132
- while True:
133
- try:
134
- messages = client.conversations.conversations(conversation_sid).messages.list(order="asc")
135
- for msg in messages:
136
- msg_time = msg.date_created.replace(tzinfo=datetime.timezone.utc)
137
- if msg_time > last_check_time:
138
- print(f"πŸ“© New message from {msg.author}: {msg.body}")
139
- answer = generate_answer_with_groq(msg.body, "\n".join(retrieve_chunks(msg.body, index, embed_model, text_chunks)))
140
- client.conversations.conversations(conversation_sid).messages.create(author="ToyBot", body=answer)
141
- last_check_time = datetime.datetime.now(datetime.timezone.utc)
142
- except TwilioRestException as e:
143
- print(f"[Twilio error] {e}")
144
- time.sleep(10)
145
-
146
- # ---------------- STREAMLIT UI ----------------
147
- st.title("🎁 ToyShop Assistant – RAG WhatsApp Bot")
148
-
149
- def load_all_documents(folder_path):
150
- full_text = ""
151
- all_tables = []
152
 
153
  for filename in os.listdir(folder_path):
154
- filepath = os.path.join(folder_path, filename)
155
- ext = filename.lower().split(".")[-1]
156
- if ext == "pdf":
157
- text, tables = extract_text_from_pdf(filepath)
158
- all_tables.extend(tables)
159
- elif ext == "docx":
160
- text = extract_text_from_docx(filepath)
161
- elif ext == "json":
162
- text = load_json_data(filepath)
163
- else:
 
 
164
  try:
165
- with open(filepath, "r", encoding="utf-8") as f:
166
- text = f.read()
167
- except Exception:
168
- continue
169
- full_text += clean_extracted_text(text) + "\n\n"
170
- return full_text, all_tables
171
-
172
- with st.spinner("Loading documents..."):
173
- full_text, tables = load_all_documents(DOCS_FOLDER)
174
-
175
- tokenizer = AutoTokenizer.from_pretrained("bert-base-uncased")
176
- embed_model = SentenceTransformer("sentence-transformers/all-MiniLM-L6-v2")
177
- chunks = chunk_text(full_text, tokenizer)
178
- embeddings = embed_model.encode(chunks)
179
-
180
- index = faiss.IndexFlatL2(embeddings.shape[1])
181
- index.add(np.array(embeddings))
182
-
183
- if "listener_started" not in st.session_state:
184
- threading.Thread(target=handle_incoming_messages, args=(index, embed_model, tokenizer, chunks), daemon=True).start()
185
- st.session_state.listener_started = True
186
- st.success("βœ… WhatsApp listener started.")
187
-
188
- st.success(f"πŸ“š Loaded {len(chunks)} text chunks from docs/ folder")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import os
 
2
  import time
3
  import threading
 
 
 
4
  import streamlit as st
5
+ from twilio.rest import Client
6
+ from sentence_transformers import SentenceTransformer
7
+ from transformers import AutoTokenizer
8
+ import faiss
9
  import numpy as np
10
+ import docx
11
+ from groq import Groq
12
  import requests
13
+ from io import StringIO
14
  from pdfminer.high_level import extract_text_to_fp
15
  from pdfminer.layout import LAParams
 
 
 
 
16
  from twilio.base.exceptions import TwilioRestException
17
+ import pdfplumber
18
+ import datetime
19
+ import csv
20
+ import json
21
+ import re
22
 
23
  APP_START_TIME = datetime.datetime.now(datetime.timezone.utc)
24
  os.environ["PYTORCH_JIT"] = "0"
25
 
26
+ # ---------------- PDF & DOCX & JSON Extraction ----------------
 
 
27
  def _extract_tables_from_page(page):
28
  tables = page.extract_tables()
29
  formatted_tables = []
30
  for table in tables:
31
+ formatted_table = []
32
+ for row in table:
33
+ formatted_row = [cell if cell is not None else "" for cell in row]
34
+ formatted_table.append(formatted_row)
35
+ formatted_tables.append(formatted_table)
36
  return formatted_tables
37
 
38
  def extract_text_from_pdf(pdf_path):
 
46
  if text:
47
  text_output.write(text + "\n\n")
48
  except Exception as e:
49
+ print(f"pdfplumber error: {e}")
50
  with open(pdf_path, 'rb') as file:
51
  extract_text_to_fp(file, text_output, laparams=LAParams(), output_type='text')
52
  return text_output.getvalue(), all_tables
53
 
54
+ def _format_tables_internal(tables):
55
+ formatted_tables_str = []
56
+ for table in tables:
57
+ with StringIO() as csvfile:
58
+ writer = csv.writer(csvfile)
59
+ writer.writerows(table)
60
+ formatted_tables_str.append(csvfile.getvalue())
61
+ return "\n\n".join(formatted_tables_str)
62
+
63
+ def clean_extracted_text(text):
64
+ return '\n'.join(' '.join(line.strip().split()) for line in text.splitlines() if line.strip())
65
+
66
  def extract_text_from_docx(docx_path):
67
  try:
68
  doc = docx.Document(docx_path)
69
+ return '\n'.join(para.text for para in doc.paragraphs)
70
+ except:
 
71
  return ""
72
 
73
  def load_json_data(json_path):
74
  try:
75
  with open(json_path, 'r', encoding='utf-8') as f:
76
  data = json.load(f)
 
 
77
  if isinstance(data, dict):
78
+ # Flatten dictionary values (avoiding nested structures as strings)
79
+ return "\n".join(f"{key}: {value}" for key, value in data.items() if not isinstance(value, (dict, list)))
80
+ elif isinstance(data, list):
81
+ # Flatten list of dictionaries
82
+ all_items = []
83
+ for item in data:
84
+ if isinstance(item, dict):
85
+ all_items.append("\n".join(f"{key}: {value}" for key, value in item.items() if not isinstance(value, (dict, list))))
86
+ return "\n\n".join(all_items)
87
+ else:
88
+ return json.dumps(data, ensure_ascii=False, indent=2)
89
  except Exception as e:
90
+ print(f"JSON read error: {e}")
91
  return ""
92
 
93
+ # ---------------- Chunking ----------------
 
 
 
94
  def chunk_text(text, tokenizer, chunk_size=128, chunk_overlap=32):
95
  tokens = tokenizer.tokenize(text)
96
+ chunks = []
97
+ start = 0
98
  while start < len(tokens):
99
  end = min(start + chunk_size, len(tokens))
100
  chunk = tokens[start:end]
 
106
  def retrieve_chunks(question, index, embed_model, text_chunks, k=3):
107
  q_embedding = embed_model.encode(question)
108
  D, I = index.search(np.array([q_embedding]), k)
109
+ return [text_chunks[i] for i in I[0]]
110
 
111
+ # ---------------- Groq Answer Generator ----------------
112
  def generate_answer_with_groq(question, context):
113
  url = "https://api.groq.com/openai/v1/chat/completions"
114
  api_key = os.environ.get("GROQ_API_KEY")
 
116
  "Authorization": f"Bearer {api_key}",
117
  "Content-Type": "application/json",
118
  }
 
119
  prompt = (
120
  f"Customer asked: '{question}'\n\n"
121
+ f"Here is the relevant information to help:\n{context}\n\n"
122
+ f"Respond in a friendly and helpful tone as a toy shop support agent, "
123
+ f"addressing the customer by their name if it's available in the context."
124
  )
 
125
  payload = {
126
  "model": "llama3-8b-8192",
127
  "messages": [
128
  {
129
  "role": "system",
130
+ "content": (
131
+ "You are ToyBot, a friendly WhatsApp assistant for an online toy shop. "
132
+ "Help customers with toys, delivery, and returns in a helpful tone. "
133
+ "When responding, try to find the customer's name in the provided context "
134
+ "and address them directly. If the context contains order details and status, "
135
+ "include that information in your response."
136
+ )
137
  },
138
+ {"role": "user", "content": prompt},
139
+ ],
140
+ "temperature": 0.5,
141
+ "max_tokens": 300,
142
  }
 
143
  response = requests.post(url, headers=headers, json=payload)
144
+ response.raise_for_status()
145
+ return response.json()['choices'][0]['message']['content'].strip()
146
+
147
+ # ---------------- Twilio Integration ----------------
148
+ def fetch_latest_incoming_message(client, conversation_sid):
149
+ try:
150
+ messages = client.conversations.v1.conversations(conversation_sid).messages.list()
151
+ for msg in reversed(messages):
152
+ if msg.author.startswith("whatsapp:"):
153
+ return {
154
+ "sid": msg.sid,
155
+ "body": msg.body,
156
+ "author": msg.author,
157
+ "timestamp": msg.date_created,
158
+ }
159
+ except TwilioRestException as e:
160
+ print(f"Twilio error: {e}")
161
+ return None
162
+
163
+ def send_twilio_message(client, conversation_sid, body):
164
+ return client.conversations.v1.conversations(conversation_sid).messages.create(
165
+ author="system", body=body
166
+ )
167
+
168
+ # ---------------- Knowledge Base Setup ----------------
169
+ def setup_knowledge_base():
170
+ folder_path = "docs"
171
+ all_text = ""
 
 
 
 
 
172
 
173
  for filename in os.listdir(folder_path):
174
+ file_path = os.path.join(folder_path, filename)
175
+ if filename.endswith(".pdf"):
176
+ text, tables = extract_text_from_pdf(file_path)
177
+ all_text += clean_extracted_text(text) + "\n"
178
+ all_text += _format_tables_internal(tables) + "\n"
179
+ elif filename.endswith(".docx"):
180
+ text = extract_text_from_docx(file_path)
181
+ all_text += clean_extracted_text(text) + "\n"
182
+ elif filename.endswith(".json"):
183
+ text = load_json_data(file_path)
184
+ all_text += text + "\n"
185
+ elif filename.endswith(".csv"):
186
  try:
187
+ with open(file_path, newline='', encoding='utf-8') as csvfile:
188
+ reader = csv.DictReader(csvfile)
189
+ for row in reader:
190
+ line = ' | '.join(f"{k}: {v}" for k, v in row.items())
191
+ all_text += line + "\n"
192
+ except Exception as e:
193
+ print(f"CSV read error: {e}")
194
+
195
+ tokenizer = AutoTokenizer.from_pretrained('bert-base-uncased')
196
+ chunks = chunk_text(all_text, tokenizer)
197
+ model = SentenceTransformer('all-mpnet-base-v2')
198
+ embeddings = model.encode(chunks, show_progress_bar=False)
199
+ dim = embeddings[0].shape[0]
200
+ index = faiss.IndexFlatL2(dim)
201
+ index.add(np.array(embeddings).astype('float32'))
202
+ return index, model, chunks
203
+
204
+ # ---------------- Monitor Twilio Conversations ----------------
205
+ def start_conversation_monitor(client, index, embed_model, text_chunks):
206
+ processed_convos = set()
207
+ last_processed_timestamp = {}
208
+
209
+ def poll_convo(convo_sid):
210
+ while True:
211
+ latest_msg = fetch_latest_incoming_message(client, convo_sid)
212
+ if latest_msg:
213
+ msg_time = latest_msg["timestamp"]
214
+ if convo_sid not in last_processed_timestamp or msg_time > last_processed_timestamp[convo_sid]:
215
+ last_processed_timestamp[convo_sid] = msg_time
216
+ question = latest_msg["body"]
217
+ sender = latest_msg["author"]
218
+ print(f"πŸ“© New message from {sender}: {question}")
219
+ context = "\n\n".join(retrieve_chunks(question, index, embed_model, text_chunks))
220
+ answer = generate_answer_with_groq(question, context)
221
+ send_twilio_message(client, convo_sid, answer)
222
+ time.sleep(5)
223
+
224
+ for convo in client.conversations.v1.conversations.list():
225
+ if convo.sid not in processed_convos:
226
+ processed_convos.add(convo.sid)
227
+ threading.Thread(target=poll_convo, args=(convo.sid,), daemon=True).start()
228
+
229
+ # ---------------- Main Entry ----------------
230
+ if __name__ == "__main__":
231
+ st.title("πŸ€– ToyBot WhatsApp Assistant")
232
+ st.write("Initializing knowledge base...")
233
+
234
+ index, model, chunks = setup_knowledge_base()
235
+
236
+ st.success("Knowledge base loaded.")
237
+ st.write("Waiting for WhatsApp messages...")
238
+
239
+ account_sid = os.environ.get("TWILIO_ACCOUNT_SID")
240
+ auth_token = os.environ.get("TWILIO_AUTH_TOKEN")
241
+ if not account_sid or not auth_token:
242
+ st.error("❌ Twilio credentials not set.")
243
+ else:
244
+ client = Client(account_sid, auth_token)
245
+ start_conversation_monitor(client, index, model, chunks)
246
+ st.info("βœ… Bot is now monitoring Twilio conversations.")