Spaces:
Sleeping
Sleeping
File size: 8,326 Bytes
f91ccef 2699d0b 60c8a15 2699d0b e992967 2699d0b f91ccef 60c8a15 021a9d3 60c8a15 2699d0b 19cd752 2699d0b b2d8bee 19cd752 2699d0b |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 |
import os
import time
import threading
import streamlit as st
from twilio.rest import Client
from sentence_transformers import SentenceTransformer
from transformers import AutoTokenizer
import faiss
import numpy as np
import docx
from groq import Groq
import requests
from io import StringIO
from pdfminer.high_level import extract_text_to_fp
from pdfminer.layout import LAParams
# --- PDF Extraction (Improved for Tables & Paragraphs) ---
def extract_text_from_pdf(pdf_path):
output_string = StringIO()
with open(pdf_path, 'rb') as file:
extract_text_to_fp(file, output_string, laparams=LAParams(), output_type='text', codec=None)
return output_string.getvalue()
def clean_extracted_text(text):
lines = text.splitlines()
cleaned = []
for line in lines:
line = line.strip()
if line:
line = ' '.join(line.split()) # remove extra spaces
cleaned.append(line)
return '\n'.join(cleaned)
# --- DOCX Extraction ---
def extract_text_from_docx(docx_path):
try:
doc = docx.Document(docx_path)
return '\n'.join(para.text for para in doc.paragraphs)
except:
return ""
# --- Chunking & Retrieval ---
def chunk_text(text, tokenizer, chunk_size=128, chunk_overlap=32, max_tokens=512):
tokens = tokenizer.tokenize(text)
chunks = []
start = 0
start += chunk_size - chunk_overlap
return chunks
def retrieve_chunks(question, index, embed_model, text_chunks, k=3):
question_embedding = embed_model.encode(question)
D, I = index.search(np.array([question_embedding]), k)
return [text_chunks[i] for i in I[0]]
# --- Groq Answer Generator ---
def generate_answer_with_groq(question, context):
url = "https://api.groq.com/openai/v1/chat/completions"
api_key = os.environ.get("GROQ_API_KEY")
headers = {
"Authorization": f"Bearer {api_key}",
"Content-Type": "application/json",
}
prompt = (
f"Customer asked: '{question}'\n\n"
f"Here is the relevant product or policy info to help:\n{context}\n\n"
f"Respond in a friendly and helpful tone as a toy shop support agent."
)
payload = {
"model": "llama3-8b-8192",
"messages": [
{
"role": "system",
"content": (
"You are ToyBot, a friendly and helpful WhatsApp assistant for an online toy shop. "
"Your goal is to politely answer customer questions, help them choose the right toys, "
"provide order or delivery information, explain return policies, and guide them through purchases."
)
},
{"role": "user", "content": prompt},
],
"temperature": 0.5,
"max_tokens": 300,
}
response = requests.post(url, headers=headers, json=payload)
response.raise_for_status()
return response.json()['choices'][0]['message']['content'].strip()
# --- Twilio Functions ---
def get_whatsapp_conversation_sids(client):
sids = []
conversations = client.conversations.v1.conversations.list(limit=50)
for convo in conversations:
try:
participants = client.conversations.v1.conversations(convo.sid).participants.list()
for p in participants:
if (p.identity and p.identity.startswith("whatsapp:")) or (
p.messaging_binding and p.messaging_binding.get("address", "").startswith("whatsapp:")
):
sids.append(convo.sid)
break
except:
continue
return sids
def fetch_latest_incoming_message(client, conversation_sid):
messages = client.conversations.v1.conversations(conversation_sid).messages.list(limit=10)
for msg in reversed(messages):
if msg.author.startswith("whatsapp:"):
return {
"sid": msg.sid,
"body": msg.body,
"author": msg.author,
"timestamp": msg.date_created,
}
return None
def send_twilio_message(client, conversation_sid, body):
return client.conversations.v1.conversations(conversation_sid).messages.create(
author="system", body=body
)
# --- Load Knowledge Base ---
def setup_knowledge_base():
folder_path = "docs"
all_text = ""
for file in os.listdir(folder_path):
path = os.path.join(folder_path, file)
if file.endswith(".pdf"):
raw_text = extract_text_from_pdf(path)
all_text += clean_extracted_text(raw_text) + "\n"
elif file.endswith((".docx", ".doc")):
all_text += extract_text_from_docx(path) + "\n"
tokenizer = AutoTokenizer.from_pretrained('bert-base-uncased')
chunks = chunk_text(all_text, tokenizer)
model = SentenceTransformer('all-mpnet-base-v2')
embeddings = model.encode(chunks, truncate=True, show_progress_bar=False)
dim = embeddings[0].shape[0]
index = faiss.IndexFlatL2(dim)
index.add(np.array(embeddings).astype('float32'))
return index, model, chunks
# --- Monitor Conversations ---
def start_conversation_monitor(client, index, embed_model, text_chunks):
monitored_sids = set()
def poll_conversation(convo_sid):
last_processed_timestamp = None
while True:
try:
latest_msg = fetch_latest_incoming_message(client, convo_sid)
if latest_msg:
msg_time = latest_msg["timestamp"]
if last_processed_timestamp is None or msg_time > last_processed_timestamp:
last_processed_timestamp = msg_time
question = latest_msg["body"]
sender = latest_msg["author"]
print(f"\nπ₯ New message from {sender} in {convo_sid}: {question}")
context = "\n\n".join(retrieve_chunks(question, index, embed_model, text_chunks))
answer = generate_answer_with_groq(question, context)
send_twilio_message(client, convo_sid, answer)
print(f"π€ Replied to {sender}: {answer}")
time.sleep(3)
except Exception as e:
print(f"β Error in convo {convo_sid} polling:", e)
time.sleep(5)
def monitor_all_conversations():
while True:
try:
current_sids = set(get_whatsapp_conversation_sids(client))
new_sids = current_sids - monitored_sids
for sid in new_sids:
print(f"β‘οΈ Monitoring new conversation: {sid}")
monitored_sids.add(sid)
threading.Thread(target=poll_conversation, args=(sid,), daemon=True).start()
time.sleep(15)
except Exception as e:
print("β Error in conversation monitoring loop:", e)
time.sleep(15)
threading.Thread(target=monitor_all_conversations, daemon=True).start()
# --- Streamlit UI ---
st.set_page_config(page_title="Quasa β A Smart WhatsApp Chatbot", layout="wide")
st.title("π± Quasa β A Smart WhatsApp Chatbot")
account_sid = st.secrets.get("TWILIO_SID")
auth_token = st.secrets.get("TWILIO_TOKEN")
GROQ_API_KEY = st.secrets.get("GROQ_API_KEY")
if not all([account_sid, auth_token, GROQ_API_KEY]):
st.warning("β οΈ Provide all credentials below:")
account_sid = st.text_input("Twilio SID", value=account_sid or "")
auth_token = st.text_input("Twilio Token", type="password", value=auth_token or "")
GROQ_API_KEY = st.text_input("GROQ API Key", type="password", value=GROQ_API_KEY or "")
if all([account_sid, auth_token, GROQ_API_KEY]):
os.environ["GROQ_API_KEY"] = GROQ_API_KEY
client = Client(account_sid, auth_token)
conversation_sids = get_whatsapp_conversation_sids(client)
if conversation_sids:
st.success(f"β
{len(conversation_sids)} WhatsApp conversation(s) found. Initializing chatbot...")
index, model, chunks = setup_knowledge_base()
start_conversation_monitor(client, index, model, chunks)
st.success("π’ Chatbot is running in background and will reply to new messages.")
else:
st.error("β No WhatsApp conversations found.") |