masadonline commited on
Commit
30db2dc
Β·
verified Β·
1 Parent(s): f02c9f6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +110 -166
app.py CHANGED
@@ -1,38 +1,34 @@
1
  import os
 
2
  import time
3
  import threading
 
 
 
4
  import streamlit as st
5
- from twilio.rest import Client
6
- from sentence_transformers import SentenceTransformer
7
- from transformers import AutoTokenizer
8
- import faiss
9
  import numpy as np
10
- import docx
11
- from groq import Groq
12
  import requests
13
- from io import StringIO
14
  from pdfminer.high_level import extract_text_to_fp
15
  from pdfminer.layout import LAParams
 
 
 
 
16
  from twilio.base.exceptions import TwilioRestException
17
- import pdfplumber
18
- import datetime
19
- import csv
20
- import json
21
- import re
22
 
 
23
  APP_START_TIME = datetime.datetime.now(datetime.timezone.utc)
24
  os.environ["PYTORCH_JIT"] = "0"
25
 
26
- # ---------------- PDF & DOCX & JSON Extraction ----------------
27
  def _extract_tables_from_page(page):
28
  tables = page.extract_tables()
29
  formatted_tables = []
30
  for table in tables:
31
- formatted_table = []
32
- for row in table:
33
- formatted_row = [cell if cell is not None else "" for cell in row]
34
- formatted_table.append(formatted_row)
35
- formatted_tables.append(formatted_table)
36
  return formatted_tables
37
 
38
  def extract_text_from_pdf(pdf_path):
@@ -46,55 +42,48 @@ def extract_text_from_pdf(pdf_path):
46
  if text:
47
  text_output.write(text + "\n\n")
48
  except Exception as e:
49
- print(f"pdfplumber error: {e}")
50
  with open(pdf_path, 'rb') as file:
51
  extract_text_to_fp(file, text_output, laparams=LAParams(), output_type='text')
52
  return text_output.getvalue(), all_tables
53
 
54
- def _format_tables_internal(tables):
55
- formatted_tables_str = []
56
- for table in tables:
57
- with StringIO() as csvfile:
58
- writer = csv.writer(csvfile)
59
- writer.writerows(table)
60
- formatted_tables_str.append(csvfile.getvalue())
61
- return "\n\n".join(formatted_tables_str)
62
-
63
- def clean_extracted_text(text):
64
- return '\n'.join(' '.join(line.strip().split()) for line in text.splitlines() if line.strip())
65
-
66
  def extract_text_from_docx(docx_path):
67
  try:
68
  doc = docx.Document(docx_path)
69
- return '\n'.join(para.text for para in doc.paragraphs)
70
- except:
 
71
  return ""
72
 
73
  def load_json_data(json_path):
74
  try:
75
  with open(json_path, 'r', encoding='utf-8') as f:
76
  data = json.load(f)
 
 
77
  if isinstance(data, dict):
78
- # Flatten dictionary values (avoiding nested structures as strings)
79
- return "\n".join(f"{key}: {value}" for key, value in data.items() if not isinstance(value, (dict, list)))
80
- elif isinstance(data, list):
81
- # Flatten list of dictionaries
82
- all_items = []
83
- for item in data:
84
- if isinstance(item, dict):
85
- all_items.append("\n".join(f"{key}: {value}" for key, value in item.items() if not isinstance(value, (dict, list))))
86
- return "\n\n".join(all_items)
87
- else:
88
- return json.dumps(data, ensure_ascii=False, indent=2)
89
  except Exception as e:
90
- print(f"JSON read error: {e}")
91
  return ""
92
 
93
- # ---------------- Chunking ----------------
 
 
 
 
 
 
 
 
 
 
 
 
94
  def chunk_text(text, tokenizer, chunk_size=128, chunk_overlap=32):
95
  tokens = tokenizer.tokenize(text)
96
- chunks = []
97
- start = 0
98
  while start < len(tokens):
99
  end = min(start + chunk_size, len(tokens))
100
  chunk = tokens[start:end]
@@ -106,9 +95,9 @@ def chunk_text(text, tokenizer, chunk_size=128, chunk_overlap=32):
106
  def retrieve_chunks(question, index, embed_model, text_chunks, k=3):
107
  q_embedding = embed_model.encode(question)
108
  D, I = index.search(np.array([q_embedding]), k)
109
- return [text_chunks[i] for i in I[0]]
110
 
111
- # ---------------- Groq Answer Generator ----------------
112
  def generate_answer_with_groq(question, context):
113
  url = "https://api.groq.com/openai/v1/chat/completions"
114
  api_key = os.environ.get("GROQ_API_KEY")
@@ -116,131 +105,86 @@ def generate_answer_with_groq(question, context):
116
  "Authorization": f"Bearer {api_key}",
117
  "Content-Type": "application/json",
118
  }
 
119
  prompt = (
120
  f"Customer asked: '{question}'\n\n"
121
- f"Here is the relevant information to help:\n{context}\n\n"
122
- f"Respond in a friendly and helpful tone as a toy shop support agent, "
123
- f"addressing the customer by their name if it's available in the context."
124
  )
 
125
  payload = {
126
  "model": "llama3-8b-8192",
127
  "messages": [
128
  {
129
  "role": "system",
130
- "content": (
131
- "You are ToyBot, a friendly WhatsApp assistant for an online toy shop. "
132
- "Help customers with toys, delivery, and returns in a helpful tone. "
133
- "When responding, try to find the customer's name in the provided context "
134
- "and address them directly. If the context contains order details and status, "
135
- "include that information in your response."
136
- )
137
  },
138
- {"role": "user", "content": prompt},
139
- ],
140
- "temperature": 0.5,
141
- "max_tokens": 300,
142
  }
143
- response = requests.post(url, headers=headers, json=payload)
144
- response.raise_for_status()
145
- return response.json()['choices'][0]['message']['content'].strip()
146
 
147
- # ---------------- Twilio Integration ----------------
148
- def fetch_latest_incoming_message(client, conversation_sid):
149
- try:
150
- messages = client.conversations.v1.conversations(conversation_sid).messages.list()
151
- for msg in reversed(messages):
152
- if msg.author.startswith("whatsapp:"):
153
- return {
154
- "sid": msg.sid,
155
- "body": msg.body,
156
- "author": msg.author,
157
- "timestamp": msg.date_created,
158
- }
159
- except TwilioRestException as e:
160
- print(f"Twilio error: {e}")
161
- return None
162
-
163
- def send_twilio_message(client, conversation_sid, body):
164
- return client.conversations.v1.conversations(conversation_sid).messages.create(
165
- author="system", body=body
166
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
167
 
168
- # ---------------- Knowledge Base Setup ----------------
169
- def setup_knowledge_base():
170
- folder_path = "docs"
171
- all_text = ""
172
-
173
- for filename in os.listdir(folder_path):
174
- file_path = os.path.join(folder_path, filename)
175
- if filename.endswith(".pdf"):
176
- text, tables = extract_text_from_pdf(file_path)
177
- all_text += clean_extracted_text(text) + "\n"
178
- all_text += _format_tables_internal(tables) + "\n"
179
- elif filename.endswith(".docx"):
180
- text = extract_text_from_docx(file_path)
181
- all_text += clean_extracted_text(text) + "\n"
182
- elif filename.endswith(".json"):
183
- text = load_json_data(file_path)
184
- all_text += text + "\n"
185
- elif filename.endswith(".csv"):
186
- try:
187
- with open(file_path, newline='', encoding='utf-8') as csvfile:
188
- reader = csv.DictReader(csvfile)
189
- for row in reader:
190
- line = ' | '.join(f"{k}: {v}" for k, v in row.items())
191
- all_text += line + "\n"
192
- except Exception as e:
193
- print(f"CSV read error: {e}")
194
-
195
- tokenizer = AutoTokenizer.from_pretrained('bert-base-uncased')
196
- chunks = chunk_text(all_text, tokenizer)
197
- model = SentenceTransformer('all-mpnet-base-v2')
198
- embeddings = model.encode(chunks, show_progress_bar=False)
199
- dim = embeddings[0].shape[0]
200
- index = faiss.IndexFlatL2(dim)
201
- index.add(np.array(embeddings).astype('float32'))
202
- return index, model, chunks
203
-
204
- # ---------------- Monitor Twilio Conversations ----------------
205
- def start_conversation_monitor(client, index, embed_model, text_chunks):
206
- processed_convos = set()
207
- last_processed_timestamp = {}
208
-
209
- def poll_convo(convo_sid):
210
- while True:
211
- latest_msg = fetch_latest_incoming_message(client, convo_sid)
212
- if latest_msg:
213
- msg_time = latest_msg["timestamp"]
214
- if convo_sid not in last_processed_timestamp or msg_time > last_processed_timestamp[convo_sid]:
215
- last_processed_timestamp[convo_sid] = msg_time
216
- question = latest_msg["body"]
217
- sender = latest_msg["author"]
218
- print(f"πŸ“© New message from {sender}: {question}")
219
- context = "\n\n".join(retrieve_chunks(question, index, embed_model, text_chunks))
220
- answer = generate_answer_with_groq(question, context)
221
- send_twilio_message(client, convo_sid, answer)
222
- time.sleep(5)
223
-
224
- for convo in client.conversations.v1.conversations.list():
225
- if convo.sid not in processed_convos:
226
- processed_convos.add(convo.sid)
227
- threading.Thread(target=poll_convo, args=(convo.sid,), daemon=True).start()
228
-
229
- # ---------------- Main Entry ----------------
230
- if __name__ == "__main__":
231
- st.title("πŸ€– ToyBot WhatsApp Assistant")
232
- st.write("Initializing knowledge base...")
233
-
234
- index, model, chunks = setup_knowledge_base()
235
-
236
- st.success("Knowledge base loaded.")
237
- st.write("Waiting for WhatsApp messages...")
238
-
239
- account_sid = os.environ.get("TWILIO_ACCOUNT_SID")
240
- auth_token = os.environ.get("TWILIO_AUTH_TOKEN")
241
- if not account_sid or not auth_token:
242
- st.error("❌ Twilio credentials not set.")
243
- else:
244
- client = Client(account_sid, auth_token)
245
- start_conversation_monitor(client, index, model, chunks)
246
- st.info("βœ… Bot is now monitoring Twilio conversations.")
 
1
  import os
2
+ import json
3
  import time
4
  import threading
5
+ import datetime
6
+ import csv
7
+ import docx
8
  import streamlit as st
9
+ from io import StringIO
 
 
 
10
  import numpy as np
 
 
11
  import requests
12
+ import pdfplumber
13
  from pdfminer.high_level import extract_text_to_fp
14
  from pdfminer.layout import LAParams
15
+ from sentence_transformers import SentenceTransformer
16
+ from transformers import AutoTokenizer
17
+ import faiss
18
+ from twilio.rest import Client
19
  from twilio.base.exceptions import TwilioRestException
 
 
 
 
 
20
 
21
+ # Start time for filtering incoming messages
22
  APP_START_TIME = datetime.datetime.now(datetime.timezone.utc)
23
  os.environ["PYTORCH_JIT"] = "0"
24
 
25
+ # ---------------- PDF / DOCX / JSON LOADERS ----------------
26
  def _extract_tables_from_page(page):
27
  tables = page.extract_tables()
28
  formatted_tables = []
29
  for table in tables:
30
+ formatted_row = [[cell if cell else "" for cell in row] for row in table]
31
+ formatted_tables.append(formatted_row)
 
 
 
32
  return formatted_tables
33
 
34
  def extract_text_from_pdf(pdf_path):
 
42
  if text:
43
  text_output.write(text + "\n\n")
44
  except Exception as e:
45
+ print(f"[pdfplumber error] Falling back: {e}")
46
  with open(pdf_path, 'rb') as file:
47
  extract_text_to_fp(file, text_output, laparams=LAParams(), output_type='text')
48
  return text_output.getvalue(), all_tables
49
 
 
 
 
 
 
 
 
 
 
 
 
 
50
  def extract_text_from_docx(docx_path):
51
  try:
52
  doc = docx.Document(docx_path)
53
+ return "\n".join(para.text for para in doc.paragraphs)
54
+ except Exception as e:
55
+ print(f"[DOCX error] {e}")
56
  return ""
57
 
58
  def load_json_data(json_path):
59
  try:
60
  with open(json_path, 'r', encoding='utf-8') as f:
61
  data = json.load(f)
62
+ if isinstance(data, list):
63
+ return "\n\n".join("\n".join(f"{k}: {v}" for k, v in d.items()) for d in data if isinstance(d, dict))
64
  if isinstance(data, dict):
65
+ return "\n".join(f"{k}: {v}" for k, v in data.items())
66
+ return str(data)
 
 
 
 
 
 
 
 
 
67
  except Exception as e:
68
+ print(f"[JSON error] {e}")
69
  return ""
70
 
71
+ def _format_tables_internal(tables):
72
+ formatted = []
73
+ for table in tables:
74
+ with StringIO() as csvfile:
75
+ writer = csv.writer(csvfile)
76
+ writer.writerows(table)
77
+ formatted.append(csvfile.getvalue())
78
+ return "\n\n".join(formatted)
79
+
80
+ def clean_extracted_text(text):
81
+ return '\n'.join(' '.join(line.strip().split()) for line in text.splitlines() if line.strip())
82
+
83
+ # ---------------- CHUNKING ----------------
84
  def chunk_text(text, tokenizer, chunk_size=128, chunk_overlap=32):
85
  tokens = tokenizer.tokenize(text)
86
+ chunks, start = [], 0
 
87
  while start < len(tokens):
88
  end = min(start + chunk_size, len(tokens))
89
  chunk = tokens[start:end]
 
95
  def retrieve_chunks(question, index, embed_model, text_chunks, k=3):
96
  q_embedding = embed_model.encode(question)
97
  D, I = index.search(np.array([q_embedding]), k)
98
+ return [text_chunks[i] for i in I[0] if i < len(text_chunks)]
99
 
100
+ # ---------------- GROQ ANSWER GENERATOR ----------------
101
  def generate_answer_with_groq(question, context):
102
  url = "https://api.groq.com/openai/v1/chat/completions"
103
  api_key = os.environ.get("GROQ_API_KEY")
 
105
  "Authorization": f"Bearer {api_key}",
106
  "Content-Type": "application/json",
107
  }
108
+
109
  prompt = (
110
  f"Customer asked: '{question}'\n\n"
111
+ f"Here is the relevant information:\n{context}\n\n"
112
+ "Reply as a friendly toy shop assistant, include customer name from the context if possible."
 
113
  )
114
+
115
  payload = {
116
  "model": "llama3-8b-8192",
117
  "messages": [
118
  {
119
  "role": "system",
120
+ "content": "You are ToyBot, a helpful assistant for an online toy shop."
 
 
 
 
 
 
121
  },
122
+ {"role": "user", "content": prompt}
123
+ ]
 
 
124
  }
 
 
 
125
 
126
+ response = requests.post(url, headers=headers, json=payload)
127
+ return response.json()["choices"][0]["message"]["content"]
128
+
129
+ # ---------------- TWILIO MONITOR ----------------
130
+ def handle_incoming_messages(index, embed_model, tokenizer, text_chunks):
131
+ client = Client(os.environ["TWILIO_ACCOUNT_SID"], os.environ["TWILIO_AUTH_TOKEN"])
132
+ conversation_sid = os.environ.get("TWILIO_CONVERSATION_SID")
133
+
134
+ if not conversation_sid:
135
+ print("❌ TWILIO_CONVERSATION_SID not set")
136
+ return
137
+
138
+ last_check_time = APP_START_TIME
139
+
140
+ while True:
141
+ try:
142
+ messages = client.conversations.conversations(conversation_sid).messages.list(order="asc")
143
+ for msg in messages:
144
+ msg_time = msg.date_created.replace(tzinfo=datetime.timezone.utc)
145
+ if msg_time > last_check_time:
146
+ print(f"πŸ“© New message from {msg.author}: {msg.body}")
147
+ answer = generate_answer_with_groq(msg.body, "\n".join(retrieve_chunks(msg.body, index, embed_model, text_chunks)))
148
+ client.conversations.conversations(conversation_sid).messages.create(author="ToyBot", body=answer)
149
+ last_check_time = datetime.datetime.now(datetime.timezone.utc)
150
+ except TwilioRestException as e:
151
+ print(f"[Twilio error] {e}")
152
+ time.sleep(10)
153
+
154
+ # ---------------- STREAMLIT UI ----------------
155
+ st.title("🎁 ToyShop Assistant – RAG WhatsApp Bot")
156
+
157
+ uploaded_files = st.file_uploader("πŸ“Ž Upload your documents (PDF, DOCX, JSON)", accept_multiple_files=True)
158
+ if uploaded_files:
159
+ full_text = ""
160
+ all_tables = []
161
 
162
+ for file in uploaded_files:
163
+ ext = file.name.lower().split(".")[-1]
164
+ if ext == "pdf":
165
+ text, tables = extract_text_from_pdf(file)
166
+ all_tables.extend(tables)
167
+ elif ext == "docx":
168
+ text = extract_text_from_docx(file)
169
+ elif ext == "json":
170
+ text = load_json_data(file)
171
+ else:
172
+ text = file.read().decode("utf-8")
173
+ full_text += clean_extracted_text(text) + "\n\n"
174
+
175
+ # Load models
176
+ tokenizer = AutoTokenizer.from_pretrained("bert-base-uncased")
177
+ embed_model = SentenceTransformer("sentence-transformers/all-MiniLM-L6-v2")
178
+ chunks = chunk_text(full_text, tokenizer)
179
+ embeddings = embed_model.encode(chunks)
180
+
181
+ index = faiss.IndexFlatL2(embeddings.shape[1])
182
+ index.add(np.array(embeddings))
183
+
184
+ # Start listener thread
185
+ if "listener_started" not in st.session_state:
186
+ threading.Thread(target=handle_incoming_messages, args=(index, embed_model, tokenizer, chunks), daemon=True).start()
187
+ st.session_state.listener_started = True
188
+ st.success("βœ… WhatsApp listener started.")
189
+
190
+ st.success(f"πŸ“š Knowledge base built with {len(chunks)} chunks")